]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.39.4-201108172006.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.39.4-201108172006.patch
1 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/elf.h linux-2.6.39.4/arch/alpha/include/asm/elf.h
2 --- linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
3 +++ linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/pgtable.h linux-2.6.39.4/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
20 +++ linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.39.4/arch/alpha/kernel/module.c linux-2.6.39.4/arch/alpha/kernel/module.c
40 --- linux-2.6.39.4/arch/alpha/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
41 +++ linux-2.6.39.4/arch/alpha/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.39.4/arch/alpha/kernel/osf_sys.c linux-2.6.39.4/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 21:11:51.000000000 -0400
53 +++ linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 19:44:33.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.39.4/arch/alpha/mm/fault.c linux-2.6.39.4/arch/alpha/mm/fault.c
86 --- linux-2.6.39.4/arch/alpha/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
87 +++ linux-2.6.39.4/arch/alpha/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.39.4/arch/arm/include/asm/elf.h linux-2.6.39.4/arch/arm/include/asm/elf.h
245 --- linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
246 +++ linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
247 @@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-2.6.39.4/arch/arm/include/asm/kmap_types.h linux-2.6.39.4/arch/arm/include/asm/kmap_types.h
275 --- linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
276 +++ linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-2.6.39.4/arch/arm/include/asm/uaccess.h linux-2.6.39.4/arch/arm/include/asm/uaccess.h
286 --- linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
287 +++ linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-2.6.39.4/arch/arm/kernel/armksyms.c linux-2.6.39.4/arch/arm/kernel/armksyms.c
344 --- linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-05-19 00:06:34.000000000 -0400
345 +++ linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-08-05 19:44:33.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-2.6.39.4/arch/arm/kernel/process.c linux-2.6.39.4/arch/arm/kernel/process.c
358 --- linux-2.6.39.4/arch/arm/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
359 +++ linux-2.6.39.4/arch/arm/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-2.6.39.4/arch/arm/kernel/traps.c linux-2.6.39.4/arch/arm/kernel/traps.c
382 --- linux-2.6.39.4/arch/arm/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
383 +++ linux-2.6.39.4/arch/arm/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
384 @@ -258,6 +258,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -285,6 +287,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_from_user.S linux-2.6.39.4/arch/arm/lib/copy_from_user.S
404 --- linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-05-19 00:06:34.000000000 -0400
405 +++ linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-08-05 19:44:33.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_to_user.S linux-2.6.39.4/arch/arm/lib/copy_to_user.S
430 --- linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-05-19 00:06:34.000000000 -0400
431 +++ linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-08-05 19:44:33.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess.S linux-2.6.39.4/arch/arm/lib/uaccess.S
456 --- linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-05-19 00:06:34.000000000 -0400
457 +++ linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-08-05 19:44:33.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-05-19 00:06:34.000000000 -0400
513 +++ linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-05 19:44:33.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-05-19 00:06:34.000000000 -0400
525 +++ linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-05 19:44:33.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-2.6.39.4/arch/arm/mm/fault.c linux-2.6.39.4/arch/arm/mm/fault.c
536 --- linux-2.6.39.4/arch/arm/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
537 +++ linux-2.6.39.4/arch/arm/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-2.6.39.4/arch/arm/mm/mmap.c linux-2.6.39.4/arch/arm/mm/mmap.c
587 --- linux-2.6.39.4/arch/arm/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
588 +++ linux-2.6.39.4/arch/arm/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/elf.h linux-2.6.39.4/arch/avr32/include/asm/elf.h
639 --- linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
640 +++ linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h
658 --- linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
659 +++ linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-2.6.39.4/arch/avr32/mm/fault.c linux-2.6.39.4/arch/avr32/mm/fault.c
671 --- linux-2.6.39.4/arch/avr32/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
672 +++ linux-2.6.39.4/arch/avr32/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-2.6.39.4/arch/frv/include/asm/kmap_types.h linux-2.6.39.4/arch/frv/include/asm/kmap_types.h
715 --- linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
716 +++ linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-2.6.39.4/arch/frv/mm/elf-fdpic.c linux-2.6.39.4/arch/frv/mm/elf-fdpic.c
726 --- linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-05-19 00:06:34.000000000 -0400
727 +++ linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-08-05 19:44:33.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/elf.h linux-2.6.39.4/arch/ia64/include/asm/elf.h
757 --- linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
758 +++ linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/pgtable.h linux-2.6.39.4/arch/ia64/include/asm/pgtable.h
774 --- linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
775 +++ linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/spinlock.h linux-2.6.39.4/arch/ia64/include/asm/spinlock.h
804 --- linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
805 +++ linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/uaccess.h linux-2.6.39.4/arch/ia64/include/asm/uaccess.h
816 --- linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
817 +++ linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-2.6.39.4/arch/ia64/kernel/module.c linux-2.6.39.4/arch/ia64/kernel/module.c
837 --- linux-2.6.39.4/arch/ia64/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
838 +++ linux-2.6.39.4/arch/ia64/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c
928 --- linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-05-19 00:06:34.000000000 -0400
929 +++ linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-08-05 19:44:33.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
964 +++ linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-05 19:44:33.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-2.6.39.4/arch/ia64/mm/fault.c linux-2.6.39.4/arch/ia64/mm/fault.c
975 --- linux-2.6.39.4/arch/ia64/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
976 +++ linux-2.6.39.4/arch/ia64/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
977 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c
1027 --- linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
1028 +++ linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-2.6.39.4/arch/ia64/mm/init.c linux-2.6.39.4/arch/ia64/mm/init.c
1039 --- linux-2.6.39.4/arch/ia64/mm/init.c 2011-05-19 00:06:34.000000000 -0400
1040 +++ linux-2.6.39.4/arch/ia64/mm/init.c 2011-08-05 19:44:33.000000000 -0400
1041 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-2.6.39.4/arch/m32r/lib/usercopy.c linux-2.6.39.4/arch/m32r/lib/usercopy.c
1062 --- linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-05-19 00:06:34.000000000 -0400
1063 +++ linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-08-05 19:44:33.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-2.6.39.4/arch/mips/include/asm/elf.h linux-2.6.39.4/arch/mips/include/asm/elf.h
1085 --- linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1086 +++ linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-2.6.39.4/arch/mips/include/asm/page.h linux-2.6.39.4/arch/mips/include/asm/page.h
1109 --- linux-2.6.39.4/arch/mips/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1110 +++ linux-2.6.39.4/arch/mips/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-2.6.39.4/arch/mips/include/asm/system.h linux-2.6.39.4/arch/mips/include/asm/system.h
1121 --- linux-2.6.39.4/arch/mips/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1122 +++ linux-2.6.39.4/arch/mips/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-05-19 00:06:34.000000000 -0400
1133 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-05 19:44:33.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-05-19 00:06:34.000000000 -0400
1150 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-05 19:44:33.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-2.6.39.4/arch/mips/kernel/process.c linux-2.6.39.4/arch/mips/kernel/process.c
1166 --- linux-2.6.39.4/arch/mips/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
1167 +++ linux-2.6.39.4/arch/mips/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-2.6.39.4/arch/mips/kernel/syscall.c linux-2.6.39.4/arch/mips/kernel/syscall.c
1185 --- linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-05-19 00:06:34.000000000 -0400
1186 +++ linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-08-05 19:44:33.000000000 -0400
1187 @@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str
1188 do_color_align = 0;
1189 if (filp || (flags & MAP_SHARED))
1190 do_color_align = 1;
1191 +
1192 +#ifdef CONFIG_PAX_RANDMMAP
1193 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1194 +#endif
1195 +
1196 if (addr) {
1197 if (do_color_align)
1198 addr = COLOUR_ALIGN(addr, pgoff);
1199 else
1200 addr = PAGE_ALIGN(addr);
1201 vmm = find_vma(current->mm, addr);
1202 - if (task_size - len >= addr &&
1203 - (!vmm || addr + len <= vmm->vm_start))
1204 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1205 return addr;
1206 }
1207 addr = current->mm->mmap_base;
1208 @@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str
1209 /* At this point: (!vmm || addr < vmm->vm_end). */
1210 if (task_size - len < addr)
1211 return -ENOMEM;
1212 - if (!vmm || addr + len <= vmm->vm_start)
1213 + if (check_heap_stack_gap(vmm, addr, len))
1214 return addr;
1215 addr = vmm->vm_end;
1216 if (do_color_align)
1217 @@ -154,33 +158,6 @@ void arch_pick_mmap_layout(struct mm_str
1218 mm->unmap_area = arch_unmap_area;
1219 }
1220
1221 -static inline unsigned long brk_rnd(void)
1222 -{
1223 - unsigned long rnd = get_random_int();
1224 -
1225 - rnd = rnd << PAGE_SHIFT;
1226 - /* 8MB for 32bit, 256MB for 64bit */
1227 - if (TASK_IS_32BIT_ADDR)
1228 - rnd = rnd & 0x7ffffful;
1229 - else
1230 - rnd = rnd & 0xffffffful;
1231 -
1232 - return rnd;
1233 -}
1234 -
1235 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1236 -{
1237 - unsigned long base = mm->brk;
1238 - unsigned long ret;
1239 -
1240 - ret = PAGE_ALIGN(base + brk_rnd());
1241 -
1242 - if (ret < mm->brk)
1243 - return mm->brk;
1244 -
1245 - return ret;
1246 -}
1247 -
1248 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
1249 unsigned long, prot, unsigned long, flags, unsigned long,
1250 fd, off_t, offset)
1251 diff -urNp linux-2.6.39.4/arch/mips/mm/fault.c linux-2.6.39.4/arch/mips/mm/fault.c
1252 --- linux-2.6.39.4/arch/mips/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1253 +++ linux-2.6.39.4/arch/mips/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1254 @@ -28,6 +28,23 @@
1255 #include <asm/highmem.h> /* For VMALLOC_END */
1256 #include <linux/kdebug.h>
1257
1258 +#ifdef CONFIG_PAX_PAGEEXEC
1259 +void pax_report_insns(void *pc, void *sp)
1260 +{
1261 + unsigned long i;
1262 +
1263 + printk(KERN_ERR "PAX: bytes at PC: ");
1264 + for (i = 0; i < 5; i++) {
1265 + unsigned int c;
1266 + if (get_user(c, (unsigned int *)pc+i))
1267 + printk(KERN_CONT "???????? ");
1268 + else
1269 + printk(KERN_CONT "%08x ", c);
1270 + }
1271 + printk("\n");
1272 +}
1273 +#endif
1274 +
1275 /*
1276 * This routine handles page faults. It determines the address,
1277 * and the problem, and then passes it off to one of the appropriate
1278 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/elf.h linux-2.6.39.4/arch/parisc/include/asm/elf.h
1279 --- linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1280 +++ linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1281 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1282
1283 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1284
1285 +#ifdef CONFIG_PAX_ASLR
1286 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1287 +
1288 +#define PAX_DELTA_MMAP_LEN 16
1289 +#define PAX_DELTA_STACK_LEN 16
1290 +#endif
1291 +
1292 /* This yields a mask that user programs can use to figure out what
1293 instruction set this CPU supports. This could be done in user space,
1294 but it's not easy, and we've already done it here. */
1295 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/pgtable.h linux-2.6.39.4/arch/parisc/include/asm/pgtable.h
1296 --- linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1297 +++ linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1298 @@ -207,6 +207,17 @@ struct vm_area_struct;
1299 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1300 #define PAGE_COPY PAGE_EXECREAD
1301 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1302 +
1303 +#ifdef CONFIG_PAX_PAGEEXEC
1304 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1305 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1306 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1307 +#else
1308 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1309 +# define PAGE_COPY_NOEXEC PAGE_COPY
1310 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1311 +#endif
1312 +
1313 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1314 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1315 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1316 diff -urNp linux-2.6.39.4/arch/parisc/kernel/module.c linux-2.6.39.4/arch/parisc/kernel/module.c
1317 --- linux-2.6.39.4/arch/parisc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
1318 +++ linux-2.6.39.4/arch/parisc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
1319 @@ -96,16 +96,38 @@
1320
1321 /* three functions to determine where in the module core
1322 * or init pieces the location is */
1323 +static inline int in_init_rx(struct module *me, void *loc)
1324 +{
1325 + return (loc >= me->module_init_rx &&
1326 + loc < (me->module_init_rx + me->init_size_rx));
1327 +}
1328 +
1329 +static inline int in_init_rw(struct module *me, void *loc)
1330 +{
1331 + return (loc >= me->module_init_rw &&
1332 + loc < (me->module_init_rw + me->init_size_rw));
1333 +}
1334 +
1335 static inline int in_init(struct module *me, void *loc)
1336 {
1337 - return (loc >= me->module_init &&
1338 - loc <= (me->module_init + me->init_size));
1339 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1340 +}
1341 +
1342 +static inline int in_core_rx(struct module *me, void *loc)
1343 +{
1344 + return (loc >= me->module_core_rx &&
1345 + loc < (me->module_core_rx + me->core_size_rx));
1346 +}
1347 +
1348 +static inline int in_core_rw(struct module *me, void *loc)
1349 +{
1350 + return (loc >= me->module_core_rw &&
1351 + loc < (me->module_core_rw + me->core_size_rw));
1352 }
1353
1354 static inline int in_core(struct module *me, void *loc)
1355 {
1356 - return (loc >= me->module_core &&
1357 - loc <= (me->module_core + me->core_size));
1358 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1359 }
1360
1361 static inline int in_local(struct module *me, void *loc)
1362 @@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
1363 }
1364
1365 /* align things a bit */
1366 - me->core_size = ALIGN(me->core_size, 16);
1367 - me->arch.got_offset = me->core_size;
1368 - me->core_size += gots * sizeof(struct got_entry);
1369 -
1370 - me->core_size = ALIGN(me->core_size, 16);
1371 - me->arch.fdesc_offset = me->core_size;
1372 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1373 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1374 + me->arch.got_offset = me->core_size_rw;
1375 + me->core_size_rw += gots * sizeof(struct got_entry);
1376 +
1377 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1378 + me->arch.fdesc_offset = me->core_size_rw;
1379 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1380
1381 me->arch.got_max = gots;
1382 me->arch.fdesc_max = fdescs;
1383 @@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
1384
1385 BUG_ON(value == 0);
1386
1387 - got = me->module_core + me->arch.got_offset;
1388 + got = me->module_core_rw + me->arch.got_offset;
1389 for (i = 0; got[i].addr; i++)
1390 if (got[i].addr == value)
1391 goto out;
1392 @@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
1393 #ifdef CONFIG_64BIT
1394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1395 {
1396 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1397 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1398
1399 if (!value) {
1400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1401 @@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
1402
1403 /* Create new one */
1404 fdesc->addr = value;
1405 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1406 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1407 return (Elf_Addr)fdesc;
1408 }
1409 #endif /* CONFIG_64BIT */
1410 @@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
1411
1412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1413 end = table + sechdrs[me->arch.unwind_section].sh_size;
1414 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1415 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1416
1417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1418 me->arch.unwind_section, table, end, gp);
1419 diff -urNp linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c
1420 --- linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-05-19 00:06:34.000000000 -0400
1421 +++ linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-08-05 19:44:33.000000000 -0400
1422 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1423 /* At this point: (!vma || addr < vma->vm_end). */
1424 if (TASK_SIZE - len < addr)
1425 return -ENOMEM;
1426 - if (!vma || addr + len <= vma->vm_start)
1427 + if (check_heap_stack_gap(vma, addr, len))
1428 return addr;
1429 addr = vma->vm_end;
1430 }
1431 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1432 /* At this point: (!vma || addr < vma->vm_end). */
1433 if (TASK_SIZE - len < addr)
1434 return -ENOMEM;
1435 - if (!vma || addr + len <= vma->vm_start)
1436 + if (check_heap_stack_gap(vma, addr, len))
1437 return addr;
1438 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1439 if (addr < vma->vm_end) /* handle wraparound */
1440 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1441 if (flags & MAP_FIXED)
1442 return addr;
1443 if (!addr)
1444 - addr = TASK_UNMAPPED_BASE;
1445 + addr = current->mm->mmap_base;
1446
1447 if (filp) {
1448 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1449 diff -urNp linux-2.6.39.4/arch/parisc/kernel/traps.c linux-2.6.39.4/arch/parisc/kernel/traps.c
1450 --- linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
1451 +++ linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
1452 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1453
1454 down_read(&current->mm->mmap_sem);
1455 vma = find_vma(current->mm,regs->iaoq[0]);
1456 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1457 - && (vma->vm_flags & VM_EXEC)) {
1458 -
1459 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1460 fault_address = regs->iaoq[0];
1461 fault_space = regs->iasq[0];
1462
1463 diff -urNp linux-2.6.39.4/arch/parisc/mm/fault.c linux-2.6.39.4/arch/parisc/mm/fault.c
1464 --- linux-2.6.39.4/arch/parisc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1465 +++ linux-2.6.39.4/arch/parisc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1466 @@ -15,6 +15,7 @@
1467 #include <linux/sched.h>
1468 #include <linux/interrupt.h>
1469 #include <linux/module.h>
1470 +#include <linux/unistd.h>
1471
1472 #include <asm/uaccess.h>
1473 #include <asm/traps.h>
1474 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1475 static unsigned long
1476 parisc_acctyp(unsigned long code, unsigned int inst)
1477 {
1478 - if (code == 6 || code == 16)
1479 + if (code == 6 || code == 7 || code == 16)
1480 return VM_EXEC;
1481
1482 switch (inst & 0xf0000000) {
1483 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1484 }
1485 #endif
1486
1487 +#ifdef CONFIG_PAX_PAGEEXEC
1488 +/*
1489 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1490 + *
1491 + * returns 1 when task should be killed
1492 + * 2 when rt_sigreturn trampoline was detected
1493 + * 3 when unpatched PLT trampoline was detected
1494 + */
1495 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1496 +{
1497 +
1498 +#ifdef CONFIG_PAX_EMUPLT
1499 + int err;
1500 +
1501 + do { /* PaX: unpatched PLT emulation */
1502 + unsigned int bl, depwi;
1503 +
1504 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1505 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1506 +
1507 + if (err)
1508 + break;
1509 +
1510 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1511 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1512 +
1513 + err = get_user(ldw, (unsigned int *)addr);
1514 + err |= get_user(bv, (unsigned int *)(addr+4));
1515 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1516 +
1517 + if (err)
1518 + break;
1519 +
1520 + if (ldw == 0x0E801096U &&
1521 + bv == 0xEAC0C000U &&
1522 + ldw2 == 0x0E881095U)
1523 + {
1524 + unsigned int resolver, map;
1525 +
1526 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1527 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1528 + if (err)
1529 + break;
1530 +
1531 + regs->gr[20] = instruction_pointer(regs)+8;
1532 + regs->gr[21] = map;
1533 + regs->gr[22] = resolver;
1534 + regs->iaoq[0] = resolver | 3UL;
1535 + regs->iaoq[1] = regs->iaoq[0] + 4;
1536 + return 3;
1537 + }
1538 + }
1539 + } while (0);
1540 +#endif
1541 +
1542 +#ifdef CONFIG_PAX_EMUTRAMP
1543 +
1544 +#ifndef CONFIG_PAX_EMUSIGRT
1545 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1546 + return 1;
1547 +#endif
1548 +
1549 + do { /* PaX: rt_sigreturn emulation */
1550 + unsigned int ldi1, ldi2, bel, nop;
1551 +
1552 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1553 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1554 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1555 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1556 +
1557 + if (err)
1558 + break;
1559 +
1560 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1561 + ldi2 == 0x3414015AU &&
1562 + bel == 0xE4008200U &&
1563 + nop == 0x08000240U)
1564 + {
1565 + regs->gr[25] = (ldi1 & 2) >> 1;
1566 + regs->gr[20] = __NR_rt_sigreturn;
1567 + regs->gr[31] = regs->iaoq[1] + 16;
1568 + regs->sr[0] = regs->iasq[1];
1569 + regs->iaoq[0] = 0x100UL;
1570 + regs->iaoq[1] = regs->iaoq[0] + 4;
1571 + regs->iasq[0] = regs->sr[2];
1572 + regs->iasq[1] = regs->sr[2];
1573 + return 2;
1574 + }
1575 + } while (0);
1576 +#endif
1577 +
1578 + return 1;
1579 +}
1580 +
1581 +void pax_report_insns(void *pc, void *sp)
1582 +{
1583 + unsigned long i;
1584 +
1585 + printk(KERN_ERR "PAX: bytes at PC: ");
1586 + for (i = 0; i < 5; i++) {
1587 + unsigned int c;
1588 + if (get_user(c, (unsigned int *)pc+i))
1589 + printk(KERN_CONT "???????? ");
1590 + else
1591 + printk(KERN_CONT "%08x ", c);
1592 + }
1593 + printk("\n");
1594 +}
1595 +#endif
1596 +
1597 int fixup_exception(struct pt_regs *regs)
1598 {
1599 const struct exception_table_entry *fix;
1600 @@ -192,8 +303,33 @@ good_area:
1601
1602 acc_type = parisc_acctyp(code,regs->iir);
1603
1604 - if ((vma->vm_flags & acc_type) != acc_type)
1605 + if ((vma->vm_flags & acc_type) != acc_type) {
1606 +
1607 +#ifdef CONFIG_PAX_PAGEEXEC
1608 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1609 + (address & ~3UL) == instruction_pointer(regs))
1610 + {
1611 + up_read(&mm->mmap_sem);
1612 + switch (pax_handle_fetch_fault(regs)) {
1613 +
1614 +#ifdef CONFIG_PAX_EMUPLT
1615 + case 3:
1616 + return;
1617 +#endif
1618 +
1619 +#ifdef CONFIG_PAX_EMUTRAMP
1620 + case 2:
1621 + return;
1622 +#endif
1623 +
1624 + }
1625 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1626 + do_group_exit(SIGKILL);
1627 + }
1628 +#endif
1629 +
1630 goto bad_area;
1631 + }
1632
1633 /*
1634 * If for any reason at all we couldn't handle the fault, make
1635 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/elf.h linux-2.6.39.4/arch/powerpc/include/asm/elf.h
1636 --- linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1637 +++ linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1638 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1639 the loader. We need to make sure that it is out of the way of the program
1640 that it will "exec", and that there is sufficient room for the brk. */
1641
1642 -extern unsigned long randomize_et_dyn(unsigned long base);
1643 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1644 +#define ELF_ET_DYN_BASE (0x20000000)
1645 +
1646 +#ifdef CONFIG_PAX_ASLR
1647 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1648 +
1649 +#ifdef __powerpc64__
1650 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1651 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1652 +#else
1653 +#define PAX_DELTA_MMAP_LEN 15
1654 +#define PAX_DELTA_STACK_LEN 15
1655 +#endif
1656 +#endif
1657
1658 /*
1659 * Our registers are always unsigned longs, whether we're a 32 bit
1660 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1661 (0x7ff >> (PAGE_SHIFT - 12)) : \
1662 (0x3ffff >> (PAGE_SHIFT - 12)))
1663
1664 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1665 -#define arch_randomize_brk arch_randomize_brk
1666 -
1667 #endif /* __KERNEL__ */
1668
1669 /*
1670 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h
1671 --- linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
1672 +++ linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
1673 @@ -27,6 +27,7 @@ enum km_type {
1674 KM_PPC_SYNC_PAGE,
1675 KM_PPC_SYNC_ICACHE,
1676 KM_KDB,
1677 + KM_CLEARPAGE,
1678 KM_TYPE_NR
1679 };
1680
1681 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page_64.h linux-2.6.39.4/arch/powerpc/include/asm/page_64.h
1682 --- linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-05-19 00:06:34.000000000 -0400
1683 +++ linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-08-05 19:44:33.000000000 -0400
1684 @@ -172,15 +172,18 @@ do { \
1685 * stack by default, so in the absence of a PT_GNU_STACK program header
1686 * we turn execute permission off.
1687 */
1688 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1689 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1690 +#define VM_STACK_DEFAULT_FLAGS32 \
1691 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1692 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1693
1694 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1695 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1696
1697 +#ifndef CONFIG_PAX_PAGEEXEC
1698 #define VM_STACK_DEFAULT_FLAGS \
1699 (is_32bit_task() ? \
1700 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1701 +#endif
1702
1703 #include <asm-generic/getorder.h>
1704
1705 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page.h linux-2.6.39.4/arch/powerpc/include/asm/page.h
1706 --- linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1707 +++ linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1708 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1709 * and needs to be executable. This means the whole heap ends
1710 * up being executable.
1711 */
1712 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1713 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1714 +#define VM_DATA_DEFAULT_FLAGS32 \
1715 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1716 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1717
1718 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1719 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1720 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1721 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1722 #endif
1723
1724 +#define ktla_ktva(addr) (addr)
1725 +#define ktva_ktla(addr) (addr)
1726 +
1727 #ifndef __ASSEMBLY__
1728
1729 #undef STRICT_MM_TYPECHECKS
1730 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h
1731 --- linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1732 +++ linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1733 @@ -2,6 +2,7 @@
1734 #define _ASM_POWERPC_PGTABLE_H
1735 #ifdef __KERNEL__
1736
1737 +#include <linux/const.h>
1738 #ifndef __ASSEMBLY__
1739 #include <asm/processor.h> /* For TASK_SIZE */
1740 #include <asm/mmu.h>
1741 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h
1742 --- linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-05-19 00:06:34.000000000 -0400
1743 +++ linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-05 19:44:33.000000000 -0400
1744 @@ -21,6 +21,7 @@
1745 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1746 #define _PAGE_USER 0x004 /* usermode access allowed */
1747 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1748 +#define _PAGE_EXEC _PAGE_GUARDED
1749 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1750 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1751 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1752 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/reg.h linux-2.6.39.4/arch/powerpc/include/asm/reg.h
1753 --- linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-05-19 00:06:34.000000000 -0400
1754 +++ linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-08-05 19:44:33.000000000 -0400
1755 @@ -201,6 +201,7 @@
1756 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1757 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1758 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1759 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1760 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1761 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1762 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1763 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/system.h linux-2.6.39.4/arch/powerpc/include/asm/system.h
1764 --- linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1765 +++ linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1766 @@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1767 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1768 #endif
1769
1770 -extern unsigned long arch_align_stack(unsigned long sp);
1771 +#define arch_align_stack(x) ((x) & ~0xfUL)
1772
1773 /* Used in very early kernel initialization. */
1774 extern unsigned long reloc_offset(void);
1775 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h
1776 --- linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
1777 +++ linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
1778 @@ -13,6 +13,8 @@
1779 #define VERIFY_READ 0
1780 #define VERIFY_WRITE 1
1781
1782 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1783 +
1784 /*
1785 * The fs value determines whether argument validity checking should be
1786 * performed or not. If get_fs() == USER_DS, checking is performed, with
1787 @@ -327,52 +329,6 @@ do { \
1788 extern unsigned long __copy_tofrom_user(void __user *to,
1789 const void __user *from, unsigned long size);
1790
1791 -#ifndef __powerpc64__
1792 -
1793 -static inline unsigned long copy_from_user(void *to,
1794 - const void __user *from, unsigned long n)
1795 -{
1796 - unsigned long over;
1797 -
1798 - if (access_ok(VERIFY_READ, from, n))
1799 - return __copy_tofrom_user((__force void __user *)to, from, n);
1800 - if ((unsigned long)from < TASK_SIZE) {
1801 - over = (unsigned long)from + n - TASK_SIZE;
1802 - return __copy_tofrom_user((__force void __user *)to, from,
1803 - n - over) + over;
1804 - }
1805 - return n;
1806 -}
1807 -
1808 -static inline unsigned long copy_to_user(void __user *to,
1809 - const void *from, unsigned long n)
1810 -{
1811 - unsigned long over;
1812 -
1813 - if (access_ok(VERIFY_WRITE, to, n))
1814 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1815 - if ((unsigned long)to < TASK_SIZE) {
1816 - over = (unsigned long)to + n - TASK_SIZE;
1817 - return __copy_tofrom_user(to, (__force void __user *)from,
1818 - n - over) + over;
1819 - }
1820 - return n;
1821 -}
1822 -
1823 -#else /* __powerpc64__ */
1824 -
1825 -#define __copy_in_user(to, from, size) \
1826 - __copy_tofrom_user((to), (from), (size))
1827 -
1828 -extern unsigned long copy_from_user(void *to, const void __user *from,
1829 - unsigned long n);
1830 -extern unsigned long copy_to_user(void __user *to, const void *from,
1831 - unsigned long n);
1832 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1833 - unsigned long n);
1834 -
1835 -#endif /* __powerpc64__ */
1836 -
1837 static inline unsigned long __copy_from_user_inatomic(void *to,
1838 const void __user *from, unsigned long n)
1839 {
1840 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1841 if (ret == 0)
1842 return 0;
1843 }
1844 +
1845 + if (!__builtin_constant_p(n))
1846 + check_object_size(to, n, false);
1847 +
1848 return __copy_tofrom_user((__force void __user *)to, from, n);
1849 }
1850
1851 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1852 if (ret == 0)
1853 return 0;
1854 }
1855 +
1856 + if (!__builtin_constant_p(n))
1857 + check_object_size(from, n, true);
1858 +
1859 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1860 }
1861
1862 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1863 return __copy_to_user_inatomic(to, from, size);
1864 }
1865
1866 +#ifndef __powerpc64__
1867 +
1868 +static inline unsigned long __must_check copy_from_user(void *to,
1869 + const void __user *from, unsigned long n)
1870 +{
1871 + unsigned long over;
1872 +
1873 + if ((long)n < 0)
1874 + return n;
1875 +
1876 + if (access_ok(VERIFY_READ, from, n)) {
1877 + if (!__builtin_constant_p(n))
1878 + check_object_size(to, n, false);
1879 + return __copy_tofrom_user((__force void __user *)to, from, n);
1880 + }
1881 + if ((unsigned long)from < TASK_SIZE) {
1882 + over = (unsigned long)from + n - TASK_SIZE;
1883 + if (!__builtin_constant_p(n - over))
1884 + check_object_size(to, n - over, false);
1885 + return __copy_tofrom_user((__force void __user *)to, from,
1886 + n - over) + over;
1887 + }
1888 + return n;
1889 +}
1890 +
1891 +static inline unsigned long __must_check copy_to_user(void __user *to,
1892 + const void *from, unsigned long n)
1893 +{
1894 + unsigned long over;
1895 +
1896 + if ((long)n < 0)
1897 + return n;
1898 +
1899 + if (access_ok(VERIFY_WRITE, to, n)) {
1900 + if (!__builtin_constant_p(n))
1901 + check_object_size(from, n, true);
1902 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1903 + }
1904 + if ((unsigned long)to < TASK_SIZE) {
1905 + over = (unsigned long)to + n - TASK_SIZE;
1906 + if (!__builtin_constant_p(n))
1907 + check_object_size(from, n - over, true);
1908 + return __copy_tofrom_user(to, (__force void __user *)from,
1909 + n - over) + over;
1910 + }
1911 + return n;
1912 +}
1913 +
1914 +#else /* __powerpc64__ */
1915 +
1916 +#define __copy_in_user(to, from, size) \
1917 + __copy_tofrom_user((to), (from), (size))
1918 +
1919 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1920 +{
1921 + if ((long)n < 0 || n > INT_MAX)
1922 + return n;
1923 +
1924 + if (!__builtin_constant_p(n))
1925 + check_object_size(to, n, false);
1926 +
1927 + if (likely(access_ok(VERIFY_READ, from, n)))
1928 + n = __copy_from_user(to, from, n);
1929 + else
1930 + memset(to, 0, n);
1931 + return n;
1932 +}
1933 +
1934 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1935 +{
1936 + if ((long)n < 0 || n > INT_MAX)
1937 + return n;
1938 +
1939 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1940 + if (!__builtin_constant_p(n))
1941 + check_object_size(from, n, true);
1942 + n = __copy_to_user(to, from, n);
1943 + }
1944 + return n;
1945 +}
1946 +
1947 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1948 + unsigned long n);
1949 +
1950 +#endif /* __powerpc64__ */
1951 +
1952 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1953
1954 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1955 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S
1956 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-05-19 00:06:34.000000000 -0400
1957 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-05 19:44:33.000000000 -0400
1958 @@ -495,6 +495,7 @@ storage_fault_common:
1959 std r14,_DAR(r1)
1960 std r15,_DSISR(r1)
1961 addi r3,r1,STACK_FRAME_OVERHEAD
1962 + bl .save_nvgprs
1963 mr r4,r14
1964 mr r5,r15
1965 ld r14,PACA_EXGEN+EX_R14(r13)
1966 @@ -504,8 +505,7 @@ storage_fault_common:
1967 cmpdi r3,0
1968 bne- 1f
1969 b .ret_from_except_lite
1970 -1: bl .save_nvgprs
1971 - mr r5,r3
1972 +1: mr r5,r3
1973 addi r3,r1,STACK_FRAME_OVERHEAD
1974 ld r4,_DAR(r1)
1975 bl .bad_page_fault
1976 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S
1977 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-05-19 00:06:34.000000000 -0400
1978 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-05 19:44:33.000000000 -0400
1979 @@ -848,10 +848,10 @@ handle_page_fault:
1980 11: ld r4,_DAR(r1)
1981 ld r5,_DSISR(r1)
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 + bl .save_nvgprs
1984 bl .do_page_fault
1985 cmpdi r3,0
1986 beq+ 13f
1987 - bl .save_nvgprs
1988 mr r5,r3
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990 lwz r4,_DAR(r1)
1991 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module_32.c linux-2.6.39.4/arch/powerpc/kernel/module_32.c
1992 --- linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-05-19 00:06:34.000000000 -0400
1993 +++ linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-08-05 19:44:33.000000000 -0400
1994 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
1995 me->arch.core_plt_section = i;
1996 }
1997 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
1998 - printk("Module doesn't contain .plt or .init.plt sections.\n");
1999 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2000 return -ENOEXEC;
2001 }
2002
2003 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2004
2005 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2006 /* Init, or core PLT? */
2007 - if (location >= mod->module_core
2008 - && location < mod->module_core + mod->core_size)
2009 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2010 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2011 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2012 - else
2013 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2014 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2015 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2016 + else {
2017 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2018 + return ~0UL;
2019 + }
2020
2021 /* Find this entry, or if that fails, the next avail. entry */
2022 while (entry->jump[0]) {
2023 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module.c linux-2.6.39.4/arch/powerpc/kernel/module.c
2024 --- linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2025 +++ linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2026 @@ -31,11 +31,24 @@
2027
2028 LIST_HEAD(module_bug_list);
2029
2030 +#ifdef CONFIG_PAX_KERNEXEC
2031 void *module_alloc(unsigned long size)
2032 {
2033 if (size == 0)
2034 return NULL;
2035
2036 + return vmalloc(size);
2037 +}
2038 +
2039 +void *module_alloc_exec(unsigned long size)
2040 +#else
2041 +void *module_alloc(unsigned long size)
2042 +#endif
2043 +
2044 +{
2045 + if (size == 0)
2046 + return NULL;
2047 +
2048 return vmalloc_exec(size);
2049 }
2050
2051 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2052 vfree(module_region);
2053 }
2054
2055 +#ifdef CONFIG_PAX_KERNEXEC
2056 +void module_free_exec(struct module *mod, void *module_region)
2057 +{
2058 + module_free(mod, module_region);
2059 +}
2060 +#endif
2061 +
2062 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2063 const Elf_Shdr *sechdrs,
2064 const char *name)
2065 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/process.c linux-2.6.39.4/arch/powerpc/kernel/process.c
2066 --- linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2067 +++ linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2068 @@ -655,8 +655,8 @@ void show_regs(struct pt_regs * regs)
2069 * Lookup NIP late so we have the best change of getting the
2070 * above info out without failing
2071 */
2072 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2073 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2074 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2075 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2076 #endif
2077 show_stack(current, (unsigned long *) regs->gpr[1]);
2078 if (!user_mode(regs))
2079 @@ -1146,10 +1146,10 @@ void show_stack(struct task_struct *tsk,
2080 newsp = stack[0];
2081 ip = stack[STACK_FRAME_LR_SAVE];
2082 if (!firstframe || ip != lr) {
2083 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2084 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2085 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2086 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2087 - printk(" (%pS)",
2088 + printk(" (%pA)",
2089 (void *)current->ret_stack[curr_frame].ret);
2090 curr_frame--;
2091 }
2092 @@ -1169,7 +1169,7 @@ void show_stack(struct task_struct *tsk,
2093 struct pt_regs *regs = (struct pt_regs *)
2094 (sp + STACK_FRAME_OVERHEAD);
2095 lr = regs->link;
2096 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2097 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2098 regs->trap, (void *)regs->nip, (void *)lr);
2099 firstframe = 1;
2100 }
2101 @@ -1244,58 +1244,3 @@ void thread_info_cache_init(void)
2102 }
2103
2104 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2105 -
2106 -unsigned long arch_align_stack(unsigned long sp)
2107 -{
2108 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2109 - sp -= get_random_int() & ~PAGE_MASK;
2110 - return sp & ~0xf;
2111 -}
2112 -
2113 -static inline unsigned long brk_rnd(void)
2114 -{
2115 - unsigned long rnd = 0;
2116 -
2117 - /* 8MB for 32bit, 1GB for 64bit */
2118 - if (is_32bit_task())
2119 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2120 - else
2121 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2122 -
2123 - return rnd << PAGE_SHIFT;
2124 -}
2125 -
2126 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2127 -{
2128 - unsigned long base = mm->brk;
2129 - unsigned long ret;
2130 -
2131 -#ifdef CONFIG_PPC_STD_MMU_64
2132 - /*
2133 - * If we are using 1TB segments and we are allowed to randomise
2134 - * the heap, we can put it above 1TB so it is backed by a 1TB
2135 - * segment. Otherwise the heap will be in the bottom 1TB
2136 - * which always uses 256MB segments and this may result in a
2137 - * performance penalty.
2138 - */
2139 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2140 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2141 -#endif
2142 -
2143 - ret = PAGE_ALIGN(base + brk_rnd());
2144 -
2145 - if (ret < mm->brk)
2146 - return mm->brk;
2147 -
2148 - return ret;
2149 -}
2150 -
2151 -unsigned long randomize_et_dyn(unsigned long base)
2152 -{
2153 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2154 -
2155 - if (ret < base)
2156 - return base;
2157 -
2158 - return ret;
2159 -}
2160 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_32.c linux-2.6.39.4/arch/powerpc/kernel/signal_32.c
2161 --- linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-05-19 00:06:34.000000000 -0400
2162 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-08-05 19:44:33.000000000 -0400
2163 @@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig
2164 /* Save user registers on the stack */
2165 frame = &rt_sf->uc.uc_mcontext;
2166 addr = frame;
2167 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2168 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2169 if (save_user_regs(regs, frame, 0, 1))
2170 goto badframe;
2171 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2172 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_64.c linux-2.6.39.4/arch/powerpc/kernel/signal_64.c
2173 --- linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-05-19 00:06:34.000000000 -0400
2174 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-08-05 19:44:33.000000000 -0400
2175 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2176 current->thread.fpscr.val = 0;
2177
2178 /* Set up to return from userspace. */
2179 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2180 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2181 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2182 } else {
2183 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2184 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/traps.c linux-2.6.39.4/arch/powerpc/kernel/traps.c
2185 --- linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
2186 +++ linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
2187 @@ -96,6 +96,8 @@ static void pmac_backlight_unblank(void)
2188 static inline void pmac_backlight_unblank(void) { }
2189 #endif
2190
2191 +extern void gr_handle_kernel_exploit(void);
2192 +
2193 int die(const char *str, struct pt_regs *regs, long err)
2194 {
2195 static struct {
2196 @@ -170,6 +172,8 @@ int die(const char *str, struct pt_regs
2197 if (panic_on_oops)
2198 panic("Fatal exception");
2199
2200 + gr_handle_kernel_exploit();
2201 +
2202 oops_exit();
2203 do_exit(err);
2204
2205 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/vdso.c linux-2.6.39.4/arch/powerpc/kernel/vdso.c
2206 --- linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-05-19 00:06:34.000000000 -0400
2207 +++ linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-08-05 19:44:33.000000000 -0400
2208 @@ -36,6 +36,7 @@
2209 #include <asm/firmware.h>
2210 #include <asm/vdso.h>
2211 #include <asm/vdso_datapage.h>
2212 +#include <asm/mman.h>
2213
2214 #include "setup.h"
2215
2216 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2217 vdso_base = VDSO32_MBASE;
2218 #endif
2219
2220 - current->mm->context.vdso_base = 0;
2221 + current->mm->context.vdso_base = ~0UL;
2222
2223 /* vDSO has a problem and was disabled, just don't "enable" it for the
2224 * process
2225 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = get_unmapped_area(NULL, vdso_base,
2227 (vdso_pages << PAGE_SHIFT) +
2228 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2229 - 0, 0);
2230 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2231 if (IS_ERR_VALUE(vdso_base)) {
2232 rc = vdso_base;
2233 goto fail_mmapsem;
2234 diff -urNp linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c
2235 --- linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
2236 +++ linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-08-05 19:44:33.000000000 -0400
2237 @@ -9,22 +9,6 @@
2238 #include <linux/module.h>
2239 #include <asm/uaccess.h>
2240
2241 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2242 -{
2243 - if (likely(access_ok(VERIFY_READ, from, n)))
2244 - n = __copy_from_user(to, from, n);
2245 - else
2246 - memset(to, 0, n);
2247 - return n;
2248 -}
2249 -
2250 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2253 - n = __copy_to_user(to, from, n);
2254 - return n;
2255 -}
2256 -
2257 unsigned long copy_in_user(void __user *to, const void __user *from,
2258 unsigned long n)
2259 {
2260 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2261 return n;
2262 }
2263
2264 -EXPORT_SYMBOL(copy_from_user);
2265 -EXPORT_SYMBOL(copy_to_user);
2266 EXPORT_SYMBOL(copy_in_user);
2267
2268 diff -urNp linux-2.6.39.4/arch/powerpc/mm/fault.c linux-2.6.39.4/arch/powerpc/mm/fault.c
2269 --- linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
2270 +++ linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
2271 @@ -31,6 +31,10 @@
2272 #include <linux/kdebug.h>
2273 #include <linux/perf_event.h>
2274 #include <linux/magic.h>
2275 +#include <linux/slab.h>
2276 +#include <linux/pagemap.h>
2277 +#include <linux/compiler.h>
2278 +#include <linux/unistd.h>
2279
2280 #include <asm/firmware.h>
2281 #include <asm/page.h>
2282 @@ -42,6 +46,7 @@
2283 #include <asm/tlbflush.h>
2284 #include <asm/siginfo.h>
2285 #include <mm/mmu_decl.h>
2286 +#include <asm/ptrace.h>
2287
2288 #ifdef CONFIG_KPROBES
2289 static inline int notify_page_fault(struct pt_regs *regs)
2290 @@ -65,6 +70,33 @@ static inline int notify_page_fault(stru
2291 }
2292 #endif
2293
2294 +#ifdef CONFIG_PAX_PAGEEXEC
2295 +/*
2296 + * PaX: decide what to do with offenders (regs->nip = fault address)
2297 + *
2298 + * returns 1 when task should be killed
2299 + */
2300 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2301 +{
2302 + return 1;
2303 +}
2304 +
2305 +void pax_report_insns(void *pc, void *sp)
2306 +{
2307 + unsigned long i;
2308 +
2309 + printk(KERN_ERR "PAX: bytes at PC: ");
2310 + for (i = 0; i < 5; i++) {
2311 + unsigned int c;
2312 + if (get_user(c, (unsigned int __user *)pc+i))
2313 + printk(KERN_CONT "???????? ");
2314 + else
2315 + printk(KERN_CONT "%08x ", c);
2316 + }
2317 + printk("\n");
2318 +}
2319 +#endif
2320 +
2321 /*
2322 * Check whether the instruction at regs->nip is a store using
2323 * an update addressing form which will update r1.
2324 @@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re
2325 * indicate errors in DSISR but can validly be set in SRR1.
2326 */
2327 if (trap == 0x400)
2328 - error_code &= 0x48200000;
2329 + error_code &= 0x58200000;
2330 else
2331 is_write = error_code & DSISR_ISSTORE;
2332 #else
2333 @@ -258,7 +290,7 @@ good_area:
2334 * "undefined". Of those that can be set, this is the only
2335 * one which seems bad.
2336 */
2337 - if (error_code & 0x10000000)
2338 + if (error_code & DSISR_GUARDED)
2339 /* Guarded storage error. */
2340 goto bad_area;
2341 #endif /* CONFIG_8xx */
2342 @@ -273,7 +305,7 @@ good_area:
2343 * processors use the same I/D cache coherency mechanism
2344 * as embedded.
2345 */
2346 - if (error_code & DSISR_PROTFAULT)
2347 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2348 goto bad_area;
2349 #endif /* CONFIG_PPC_STD_MMU */
2350
2351 @@ -342,6 +374,23 @@ bad_area:
2352 bad_area_nosemaphore:
2353 /* User mode accesses cause a SIGSEGV */
2354 if (user_mode(regs)) {
2355 +
2356 +#ifdef CONFIG_PAX_PAGEEXEC
2357 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2358 +#ifdef CONFIG_PPC_STD_MMU
2359 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2360 +#else
2361 + if (is_exec && regs->nip == address) {
2362 +#endif
2363 + switch (pax_handle_fetch_fault(regs)) {
2364 + }
2365 +
2366 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2367 + do_group_exit(SIGKILL);
2368 + }
2369 + }
2370 +#endif
2371 +
2372 _exception(SIGSEGV, regs, code, address);
2373 return 0;
2374 }
2375 diff -urNp linux-2.6.39.4/arch/powerpc/mm/mmap_64.c linux-2.6.39.4/arch/powerpc/mm/mmap_64.c
2376 --- linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-05-19 00:06:34.000000000 -0400
2377 +++ linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-08-05 19:44:33.000000000 -0400
2378 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2379 */
2380 if (mmap_is_legacy()) {
2381 mm->mmap_base = TASK_UNMAPPED_BASE;
2382 +
2383 +#ifdef CONFIG_PAX_RANDMMAP
2384 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2385 + mm->mmap_base += mm->delta_mmap;
2386 +#endif
2387 +
2388 mm->get_unmapped_area = arch_get_unmapped_area;
2389 mm->unmap_area = arch_unmap_area;
2390 } else {
2391 mm->mmap_base = mmap_base();
2392 +
2393 +#ifdef CONFIG_PAX_RANDMMAP
2394 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2395 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2396 +#endif
2397 +
2398 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2399 mm->unmap_area = arch_unmap_area_topdown;
2400 }
2401 diff -urNp linux-2.6.39.4/arch/powerpc/mm/slice.c linux-2.6.39.4/arch/powerpc/mm/slice.c
2402 --- linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-05-19 00:06:34.000000000 -0400
2403 +++ linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-08-05 19:44:33.000000000 -0400
2404 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2405 if ((mm->task_size - len) < addr)
2406 return 0;
2407 vma = find_vma(mm, addr);
2408 - return (!vma || (addr + len) <= vma->vm_start);
2409 + return check_heap_stack_gap(vma, addr, len);
2410 }
2411
2412 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2413 @@ -256,7 +256,7 @@ full_search:
2414 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2415 continue;
2416 }
2417 - if (!vma || addr + len <= vma->vm_start) {
2418 + if (check_heap_stack_gap(vma, addr, len)) {
2419 /*
2420 * Remember the place where we stopped the search:
2421 */
2422 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2423 }
2424 }
2425
2426 - addr = mm->mmap_base;
2427 - while (addr > len) {
2428 + if (mm->mmap_base < len)
2429 + addr = -ENOMEM;
2430 + else
2431 + addr = mm->mmap_base - len;
2432 +
2433 + while (!IS_ERR_VALUE(addr)) {
2434 /* Go down by chunk size */
2435 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2436 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2437
2438 /* Check for hit with different page size */
2439 mask = slice_range_to_mask(addr, len);
2440 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2441 * return with success:
2442 */
2443 vma = find_vma(mm, addr);
2444 - if (!vma || (addr + len) <= vma->vm_start) {
2445 + if (check_heap_stack_gap(vma, addr, len)) {
2446 /* remember the address as a hint for next time */
2447 if (use_cache)
2448 mm->free_area_cache = addr;
2449 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2450 mm->cached_hole_size = vma->vm_start - addr;
2451
2452 /* try just below the current vma->vm_start */
2453 - addr = vma->vm_start;
2454 + addr = skip_heap_stack_gap(vma, len);
2455 }
2456
2457 /*
2458 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2459 if (fixed && addr > (mm->task_size - len))
2460 return -EINVAL;
2461
2462 +#ifdef CONFIG_PAX_RANDMMAP
2463 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2464 + addr = 0;
2465 +#endif
2466 +
2467 /* If hint, make sure it matches our alignment restrictions */
2468 if (!fixed && addr) {
2469 addr = _ALIGN_UP(addr, 1ul << pshift);
2470 diff -urNp linux-2.6.39.4/arch/s390/include/asm/elf.h linux-2.6.39.4/arch/s390/include/asm/elf.h
2471 --- linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
2472 +++ linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
2473 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2474 the loader. We need to make sure that it is out of the way of the program
2475 that it will "exec", and that there is sufficient room for the brk. */
2476
2477 -extern unsigned long randomize_et_dyn(unsigned long base);
2478 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2479 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2480 +
2481 +#ifdef CONFIG_PAX_ASLR
2482 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2483 +
2484 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2485 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2486 +#endif
2487
2488 /* This yields a mask that user programs can use to figure out what
2489 instruction set this CPU supports. */
2490 @@ -222,7 +228,4 @@ struct linux_binprm;
2491 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2492 int arch_setup_additional_pages(struct linux_binprm *, int);
2493
2494 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2495 -#define arch_randomize_brk arch_randomize_brk
2496 -
2497 #endif
2498 diff -urNp linux-2.6.39.4/arch/s390/include/asm/system.h linux-2.6.39.4/arch/s390/include/asm/system.h
2499 --- linux-2.6.39.4/arch/s390/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2500 +++ linux-2.6.39.4/arch/s390/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2501 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2502 extern void (*_machine_halt)(void);
2503 extern void (*_machine_power_off)(void);
2504
2505 -extern unsigned long arch_align_stack(unsigned long sp);
2506 +#define arch_align_stack(x) ((x) & ~0xfUL)
2507
2508 static inline int tprot(unsigned long addr)
2509 {
2510 diff -urNp linux-2.6.39.4/arch/s390/include/asm/uaccess.h linux-2.6.39.4/arch/s390/include/asm/uaccess.h
2511 --- linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
2512 +++ linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
2513 @@ -234,6 +234,10 @@ static inline unsigned long __must_check
2514 copy_to_user(void __user *to, const void *from, unsigned long n)
2515 {
2516 might_fault();
2517 +
2518 + if ((long)n < 0)
2519 + return n;
2520 +
2521 if (access_ok(VERIFY_WRITE, to, n))
2522 n = __copy_to_user(to, from, n);
2523 return n;
2524 @@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
2525 static inline unsigned long __must_check
2526 __copy_from_user(void *to, const void __user *from, unsigned long n)
2527 {
2528 + if ((long)n < 0)
2529 + return n;
2530 +
2531 if (__builtin_constant_p(n) && (n <= 256))
2532 return uaccess.copy_from_user_small(n, from, to);
2533 else
2534 @@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
2535 unsigned int sz = __compiletime_object_size(to);
2536
2537 might_fault();
2538 +
2539 + if ((long)n < 0)
2540 + return n;
2541 +
2542 if (unlikely(sz != -1 && sz < n)) {
2543 copy_from_user_overflow();
2544 return n;
2545 diff -urNp linux-2.6.39.4/arch/s390/Kconfig linux-2.6.39.4/arch/s390/Kconfig
2546 --- linux-2.6.39.4/arch/s390/Kconfig 2011-05-19 00:06:34.000000000 -0400
2547 +++ linux-2.6.39.4/arch/s390/Kconfig 2011-08-05 19:44:33.000000000 -0400
2548 @@ -234,11 +234,9 @@ config S390_EXEC_PROTECT
2549 prompt "Data execute protection"
2550 help
2551 This option allows to enable a buffer overflow protection for user
2552 - space programs and it also selects the addressing mode option above.
2553 - The kernel parameter noexec=on will enable this feature and also
2554 - switch the addressing modes, default is disabled. Enabling this (via
2555 - kernel parameter) on machines earlier than IBM System z9 this will
2556 - reduce system performance.
2557 + space programs.
2558 + Enabling this (via kernel parameter) on machines earlier than IBM
2559 + System z9 this will reduce system performance.
2560
2561 comment "Code generation options"
2562
2563 diff -urNp linux-2.6.39.4/arch/s390/kernel/module.c linux-2.6.39.4/arch/s390/kernel/module.c
2564 --- linux-2.6.39.4/arch/s390/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2565 +++ linux-2.6.39.4/arch/s390/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2566 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2567
2568 /* Increase core size by size of got & plt and set start
2569 offsets for got and plt. */
2570 - me->core_size = ALIGN(me->core_size, 4);
2571 - me->arch.got_offset = me->core_size;
2572 - me->core_size += me->arch.got_size;
2573 - me->arch.plt_offset = me->core_size;
2574 - me->core_size += me->arch.plt_size;
2575 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2576 + me->arch.got_offset = me->core_size_rw;
2577 + me->core_size_rw += me->arch.got_size;
2578 + me->arch.plt_offset = me->core_size_rx;
2579 + me->core_size_rx += me->arch.plt_size;
2580 return 0;
2581 }
2582
2583 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 if (info->got_initialized == 0) {
2585 Elf_Addr *gotent;
2586
2587 - gotent = me->module_core + me->arch.got_offset +
2588 + gotent = me->module_core_rw + me->arch.got_offset +
2589 info->got_offset;
2590 *gotent = val;
2591 info->got_initialized = 1;
2592 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 else if (r_type == R_390_GOTENT ||
2594 r_type == R_390_GOTPLTENT)
2595 *(unsigned int *) loc =
2596 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2597 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2598 else if (r_type == R_390_GOT64 ||
2599 r_type == R_390_GOTPLT64)
2600 *(unsigned long *) loc = val;
2601 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2603 if (info->plt_initialized == 0) {
2604 unsigned int *ip;
2605 - ip = me->module_core + me->arch.plt_offset +
2606 + ip = me->module_core_rx + me->arch.plt_offset +
2607 info->plt_offset;
2608 #ifndef CONFIG_64BIT
2609 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2610 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 val - loc + 0xffffUL < 0x1ffffeUL) ||
2612 (r_type == R_390_PLT32DBL &&
2613 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2614 - val = (Elf_Addr) me->module_core +
2615 + val = (Elf_Addr) me->module_core_rx +
2616 me->arch.plt_offset +
2617 info->plt_offset;
2618 val += rela->r_addend - loc;
2619 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2621 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2622 val = val + rela->r_addend -
2623 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2624 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2625 if (r_type == R_390_GOTOFF16)
2626 *(unsigned short *) loc = val;
2627 else if (r_type == R_390_GOTOFF32)
2628 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2629 break;
2630 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2631 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2632 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2633 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2634 rela->r_addend - loc;
2635 if (r_type == R_390_GOTPC)
2636 *(unsigned int *) loc = val;
2637 diff -urNp linux-2.6.39.4/arch/s390/kernel/process.c linux-2.6.39.4/arch/s390/kernel/process.c
2638 --- linux-2.6.39.4/arch/s390/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2639 +++ linux-2.6.39.4/arch/s390/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2640 @@ -334,39 +334,3 @@ unsigned long get_wchan(struct task_stru
2641 }
2642 return 0;
2643 }
2644 -
2645 -unsigned long arch_align_stack(unsigned long sp)
2646 -{
2647 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2648 - sp -= get_random_int() & ~PAGE_MASK;
2649 - return sp & ~0xf;
2650 -}
2651 -
2652 -static inline unsigned long brk_rnd(void)
2653 -{
2654 - /* 8MB for 32bit, 1GB for 64bit */
2655 - if (is_32bit_task())
2656 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2657 - else
2658 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2659 -}
2660 -
2661 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2664 -
2665 - if (ret < mm->brk)
2666 - return mm->brk;
2667 - return ret;
2668 -}
2669 -
2670 -unsigned long randomize_et_dyn(unsigned long base)
2671 -{
2672 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2673 -
2674 - if (!(current->flags & PF_RANDOMIZE))
2675 - return base;
2676 - if (ret < base)
2677 - return base;
2678 - return ret;
2679 -}
2680 diff -urNp linux-2.6.39.4/arch/s390/kernel/setup.c linux-2.6.39.4/arch/s390/kernel/setup.c
2681 --- linux-2.6.39.4/arch/s390/kernel/setup.c 2011-05-19 00:06:34.000000000 -0400
2682 +++ linux-2.6.39.4/arch/s390/kernel/setup.c 2011-08-05 19:44:33.000000000 -0400
2683 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2684 }
2685 early_param("mem", early_parse_mem);
2686
2687 -unsigned int user_mode = HOME_SPACE_MODE;
2688 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2689 EXPORT_SYMBOL_GPL(user_mode);
2690
2691 static int set_amode_and_uaccess(unsigned long user_amode,
2692 @@ -300,17 +300,6 @@ static int set_amode_and_uaccess(unsigne
2693 }
2694 }
2695
2696 -/*
2697 - * Switch kernel/user addressing modes?
2698 - */
2699 -static int __init early_parse_switch_amode(char *p)
2700 -{
2701 - if (user_mode != SECONDARY_SPACE_MODE)
2702 - user_mode = PRIMARY_SPACE_MODE;
2703 - return 0;
2704 -}
2705 -early_param("switch_amode", early_parse_switch_amode);
2706 -
2707 static int __init early_parse_user_mode(char *p)
2708 {
2709 if (p && strcmp(p, "primary") == 0)
2710 @@ -327,20 +316,6 @@ static int __init early_parse_user_mode(
2711 }
2712 early_param("user_mode", early_parse_user_mode);
2713
2714 -#ifdef CONFIG_S390_EXEC_PROTECT
2715 -/*
2716 - * Enable execute protection?
2717 - */
2718 -static int __init early_parse_noexec(char *p)
2719 -{
2720 - if (!strncmp(p, "off", 3))
2721 - return 0;
2722 - user_mode = SECONDARY_SPACE_MODE;
2723 - return 0;
2724 -}
2725 -early_param("noexec", early_parse_noexec);
2726 -#endif /* CONFIG_S390_EXEC_PROTECT */
2727 -
2728 static void setup_addressing_mode(void)
2729 {
2730 if (user_mode == SECONDARY_SPACE_MODE) {
2731 diff -urNp linux-2.6.39.4/arch/s390/mm/mmap.c linux-2.6.39.4/arch/s390/mm/mmap.c
2732 --- linux-2.6.39.4/arch/s390/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2733 +++ linux-2.6.39.4/arch/s390/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2734 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2735 */
2736 if (mmap_is_legacy()) {
2737 mm->mmap_base = TASK_UNMAPPED_BASE;
2738 +
2739 +#ifdef CONFIG_PAX_RANDMMAP
2740 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2741 + mm->mmap_base += mm->delta_mmap;
2742 +#endif
2743 +
2744 mm->get_unmapped_area = arch_get_unmapped_area;
2745 mm->unmap_area = arch_unmap_area;
2746 } else {
2747 mm->mmap_base = mmap_base();
2748 +
2749 +#ifdef CONFIG_PAX_RANDMMAP
2750 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2751 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2752 +#endif
2753 +
2754 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2755 mm->unmap_area = arch_unmap_area_topdown;
2756 }
2757 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2758 */
2759 if (mmap_is_legacy()) {
2760 mm->mmap_base = TASK_UNMAPPED_BASE;
2761 +
2762 +#ifdef CONFIG_PAX_RANDMMAP
2763 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2764 + mm->mmap_base += mm->delta_mmap;
2765 +#endif
2766 +
2767 mm->get_unmapped_area = s390_get_unmapped_area;
2768 mm->unmap_area = arch_unmap_area;
2769 } else {
2770 mm->mmap_base = mmap_base();
2771 +
2772 +#ifdef CONFIG_PAX_RANDMMAP
2773 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2774 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2775 +#endif
2776 +
2777 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2778 mm->unmap_area = arch_unmap_area_topdown;
2779 }
2780 diff -urNp linux-2.6.39.4/arch/score/include/asm/system.h linux-2.6.39.4/arch/score/include/asm/system.h
2781 --- linux-2.6.39.4/arch/score/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2782 +++ linux-2.6.39.4/arch/score/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2783 @@ -17,7 +17,7 @@ do { \
2784 #define finish_arch_switch(prev) do {} while (0)
2785
2786 typedef void (*vi_handler_t)(void);
2787 -extern unsigned long arch_align_stack(unsigned long sp);
2788 +#define arch_align_stack(x) (x)
2789
2790 #define mb() barrier()
2791 #define rmb() barrier()
2792 diff -urNp linux-2.6.39.4/arch/score/kernel/process.c linux-2.6.39.4/arch/score/kernel/process.c
2793 --- linux-2.6.39.4/arch/score/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2794 +++ linux-2.6.39.4/arch/score/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2795 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2796
2797 return task_pt_regs(task)->cp0_epc;
2798 }
2799 -
2800 -unsigned long arch_align_stack(unsigned long sp)
2801 -{
2802 - return sp;
2803 -}
2804 diff -urNp linux-2.6.39.4/arch/sh/mm/mmap.c linux-2.6.39.4/arch/sh/mm/mmap.c
2805 --- linux-2.6.39.4/arch/sh/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2806 +++ linux-2.6.39.4/arch/sh/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2807 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2808 addr = PAGE_ALIGN(addr);
2809
2810 vma = find_vma(mm, addr);
2811 - if (TASK_SIZE - len >= addr &&
2812 - (!vma || addr + len <= vma->vm_start))
2813 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2814 return addr;
2815 }
2816
2817 @@ -106,7 +105,7 @@ full_search:
2818 }
2819 return -ENOMEM;
2820 }
2821 - if (likely(!vma || addr + len <= vma->vm_start)) {
2822 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2823 /*
2824 * Remember the place where we stopped the search:
2825 */
2826 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2827 addr = PAGE_ALIGN(addr);
2828
2829 vma = find_vma(mm, addr);
2830 - if (TASK_SIZE - len >= addr &&
2831 - (!vma || addr + len <= vma->vm_start))
2832 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2833 return addr;
2834 }
2835
2836 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2837 /* make sure it can fit in the remaining address space */
2838 if (likely(addr > len)) {
2839 vma = find_vma(mm, addr-len);
2840 - if (!vma || addr <= vma->vm_start) {
2841 + if (check_heap_stack_gap(vma, addr - len, len)) {
2842 /* remember the address as a hint for next time */
2843 return (mm->free_area_cache = addr-len);
2844 }
2845 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2846 if (unlikely(mm->mmap_base < len))
2847 goto bottomup;
2848
2849 - addr = mm->mmap_base-len;
2850 - if (do_colour_align)
2851 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2852 + addr = mm->mmap_base - len;
2853
2854 do {
2855 + if (do_colour_align)
2856 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2857 /*
2858 * Lookup failure means no vma is above this address,
2859 * else if new region fits below vma->vm_start,
2860 * return with success:
2861 */
2862 vma = find_vma(mm, addr);
2863 - if (likely(!vma || addr+len <= vma->vm_start)) {
2864 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2865 /* remember the address as a hint for next time */
2866 return (mm->free_area_cache = addr);
2867 }
2868 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2869 mm->cached_hole_size = vma->vm_start - addr;
2870
2871 /* try just below the current vma->vm_start */
2872 - addr = vma->vm_start-len;
2873 - if (do_colour_align)
2874 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2875 - } while (likely(len < vma->vm_start));
2876 + addr = skip_heap_stack_gap(vma, len);
2877 + } while (!IS_ERR_VALUE(addr));
2878
2879 bottomup:
2880 /*
2881 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h
2882 --- linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-05-19 00:06:34.000000000 -0400
2883 +++ linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-08-05 20:34:06.000000000 -0400
2884 @@ -14,18 +14,40 @@
2885 #define ATOMIC64_INIT(i) { (i) }
2886
2887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2888 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2889 +{
2890 + return v->counter;
2891 +}
2892 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2893 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2894 +{
2895 + return v->counter;
2896 +}
2897
2898 #define atomic_set(v, i) (((v)->counter) = i)
2899 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2900 +{
2901 + v->counter = i;
2902 +}
2903 #define atomic64_set(v, i) (((v)->counter) = i)
2904 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2905 +{
2906 + v->counter = i;
2907 +}
2908
2909 extern void atomic_add(int, atomic_t *);
2910 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2911 extern void atomic64_add(long, atomic64_t *);
2912 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2913 extern void atomic_sub(int, atomic_t *);
2914 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2915 extern void atomic64_sub(long, atomic64_t *);
2916 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2917
2918 extern int atomic_add_ret(int, atomic_t *);
2919 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2920 extern long atomic64_add_ret(long, atomic64_t *);
2921 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2922 extern int atomic_sub_ret(int, atomic_t *);
2923 extern long atomic64_sub_ret(long, atomic64_t *);
2924
2925 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2926 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2927
2928 #define atomic_inc_return(v) atomic_add_ret(1, v)
2929 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2930 +{
2931 + return atomic_add_ret_unchecked(1, v);
2932 +}
2933 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2934 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2935 +{
2936 + return atomic64_add_ret_unchecked(1, v);
2937 +}
2938
2939 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2940 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2941
2942 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2943 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2944 +{
2945 + return atomic_add_ret_unchecked(i, v);
2946 +}
2947 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2948 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2949 +{
2950 + return atomic64_add_ret_unchecked(i, v);
2951 +}
2952
2953 /*
2954 * atomic_inc_and_test - increment and test
2955 @@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
2956 * other cases.
2957 */
2958 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2959 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
2960 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2961
2962 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2963 @@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
2964 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2965
2966 #define atomic_inc(v) atomic_add(1, v)
2967 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2968 +{
2969 + atomic_add_unchecked(1, v);
2970 +}
2971 #define atomic64_inc(v) atomic64_add(1, v)
2972 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2973 +{
2974 + atomic64_add_unchecked(1, v);
2975 +}
2976
2977 #define atomic_dec(v) atomic_sub(1, v)
2978 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2979 +{
2980 + atomic_sub_unchecked(1, v);
2981 +}
2982 #define atomic64_dec(v) atomic64_sub(1, v)
2983 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2984 +{
2985 + atomic64_sub_unchecked(1, v);
2986 +}
2987
2988 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2989 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2990
2991 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2992 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2993 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2994 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
2995
2996 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2997 {
2998 - int c, old;
2999 + int c, old, new;
3000 c = atomic_read(v);
3001 for (;;) {
3002 - if (unlikely(c == (u)))
3003 + if (unlikely(c == u))
3004 break;
3005 - old = atomic_cmpxchg((v), c, c + (a));
3006 +
3007 + asm volatile("addcc %2, %0, %0\n"
3008 +
3009 +#ifdef CONFIG_PAX_REFCOUNT
3010 + "tvs %%icc, 6\n"
3011 +#endif
3012 +
3013 + : "=r" (new)
3014 + : "0" (c), "ir" (a)
3015 + : "cc");
3016 +
3017 + old = atomic_cmpxchg(v, c, new);
3018 if (likely(old == c))
3019 break;
3020 c = old;
3021 }
3022 - return c != (u);
3023 + return c != u;
3024 }
3025
3026 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3027 @@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3028
3029 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3030 {
3031 - long c, old;
3032 + long c, old, new;
3033 c = atomic64_read(v);
3034 for (;;) {
3035 - if (unlikely(c == (u)))
3036 + if (unlikely(c == u))
3037 break;
3038 - old = atomic64_cmpxchg((v), c, c + (a));
3039 +
3040 + asm volatile("addcc %2, %0, %0\n"
3041 +
3042 +#ifdef CONFIG_PAX_REFCOUNT
3043 + "tvs %%xcc, 6\n"
3044 +#endif
3045 +
3046 + : "=r" (new)
3047 + : "0" (c), "ir" (a)
3048 + : "cc");
3049 +
3050 + old = atomic64_cmpxchg(v, c, new);
3051 if (likely(old == c))
3052 break;
3053 c = old;
3054 }
3055 - return c != (u);
3056 + return c != u;
3057 }
3058
3059 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3060 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/cache.h linux-2.6.39.4/arch/sparc/include/asm/cache.h
3061 --- linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
3062 +++ linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
3063 @@ -10,7 +10,7 @@
3064 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3065
3066 #define L1_CACHE_SHIFT 5
3067 -#define L1_CACHE_BYTES 32
3068 +#define L1_CACHE_BYTES 32UL
3069
3070 #ifdef CONFIG_SPARC32
3071 #define SMP_CACHE_BYTES_SHIFT 5
3072 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_32.h linux-2.6.39.4/arch/sparc/include/asm/elf_32.h
3073 --- linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-05-19 00:06:34.000000000 -0400
3074 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-08-05 19:44:33.000000000 -0400
3075 @@ -114,6 +114,13 @@ typedef struct {
3076
3077 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3078
3079 +#ifdef CONFIG_PAX_ASLR
3080 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3081 +
3082 +#define PAX_DELTA_MMAP_LEN 16
3083 +#define PAX_DELTA_STACK_LEN 16
3084 +#endif
3085 +
3086 /* This yields a mask that user programs can use to figure out what
3087 instruction set this cpu supports. This can NOT be done in userspace
3088 on Sparc. */
3089 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_64.h linux-2.6.39.4/arch/sparc/include/asm/elf_64.h
3090 --- linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-05-19 00:06:34.000000000 -0400
3091 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-08-05 19:44:33.000000000 -0400
3092 @@ -162,6 +162,12 @@ typedef struct {
3093 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3094 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3095
3096 +#ifdef CONFIG_PAX_ASLR
3097 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3098 +
3099 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3100 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3101 +#endif
3102
3103 /* This yields a mask that user programs can use to figure out what
3104 instruction set this cpu supports. */
3105 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h
3106 --- linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
3107 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
3108 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3109 BTFIXUPDEF_INT(page_none)
3110 BTFIXUPDEF_INT(page_copy)
3111 BTFIXUPDEF_INT(page_readonly)
3112 +
3113 +#ifdef CONFIG_PAX_PAGEEXEC
3114 +BTFIXUPDEF_INT(page_shared_noexec)
3115 +BTFIXUPDEF_INT(page_copy_noexec)
3116 +BTFIXUPDEF_INT(page_readonly_noexec)
3117 +#endif
3118 +
3119 BTFIXUPDEF_INT(page_kernel)
3120
3121 #define PMD_SHIFT SUN4C_PMD_SHIFT
3122 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3123 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3124 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3125
3126 +#ifdef CONFIG_PAX_PAGEEXEC
3127 +extern pgprot_t PAGE_SHARED_NOEXEC;
3128 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3129 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3130 +#else
3131 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3132 +# define PAGE_COPY_NOEXEC PAGE_COPY
3133 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3134 +#endif
3135 +
3136 extern unsigned long page_kernel;
3137
3138 #ifdef MODULE
3139 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h
3140 --- linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-05-19 00:06:34.000000000 -0400
3141 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-05 19:44:33.000000000 -0400
3142 @@ -115,6 +115,13 @@
3143 SRMMU_EXEC | SRMMU_REF)
3144 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3145 SRMMU_EXEC | SRMMU_REF)
3146 +
3147 +#ifdef CONFIG_PAX_PAGEEXEC
3148 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3149 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3150 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3151 +#endif
3152 +
3153 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3154 SRMMU_DIRTY | SRMMU_REF)
3155
3156 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h
3157 --- linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-05-19 00:06:34.000000000 -0400
3158 +++ linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-08-05 19:44:33.000000000 -0400
3159 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3160
3161 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3162
3163 -static void inline arch_read_lock(arch_rwlock_t *lock)
3164 +static inline void arch_read_lock(arch_rwlock_t *lock)
3165 {
3166 unsigned long tmp1, tmp2;
3167
3168 __asm__ __volatile__ (
3169 "1: ldsw [%2], %0\n"
3170 " brlz,pn %0, 2f\n"
3171 -"4: add %0, 1, %1\n"
3172 +"4: addcc %0, 1, %1\n"
3173 +
3174 +#ifdef CONFIG_PAX_REFCOUNT
3175 +" tvs %%icc, 6\n"
3176 +#endif
3177 +
3178 " cas [%2], %0, %1\n"
3179 " cmp %0, %1\n"
3180 " bne,pn %%icc, 1b\n"
3181 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3182 " .previous"
3183 : "=&r" (tmp1), "=&r" (tmp2)
3184 : "r" (lock)
3185 - : "memory");
3186 + : "memory", "cc");
3187 }
3188
3189 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3190 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3191 {
3192 int tmp1, tmp2;
3193
3194 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3195 "1: ldsw [%2], %0\n"
3196 " brlz,a,pn %0, 2f\n"
3197 " mov 0, %0\n"
3198 -" add %0, 1, %1\n"
3199 +" addcc %0, 1, %1\n"
3200 +
3201 +#ifdef CONFIG_PAX_REFCOUNT
3202 +" tvs %%icc, 6\n"
3203 +#endif
3204 +
3205 " cas [%2], %0, %1\n"
3206 " cmp %0, %1\n"
3207 " bne,pn %%icc, 1b\n"
3208 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3209 return tmp1;
3210 }
3211
3212 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3213 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3214 {
3215 unsigned long tmp1, tmp2;
3216
3217 __asm__ __volatile__(
3218 "1: lduw [%2], %0\n"
3219 -" sub %0, 1, %1\n"
3220 +" subcc %0, 1, %1\n"
3221 +
3222 +#ifdef CONFIG_PAX_REFCOUNT
3223 +" tvs %%icc, 6\n"
3224 +#endif
3225 +
3226 " cas [%2], %0, %1\n"
3227 " cmp %0, %1\n"
3228 " bne,pn %%xcc, 1b\n"
3229 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3230 : "memory");
3231 }
3232
3233 -static void inline arch_write_lock(arch_rwlock_t *lock)
3234 +static inline void arch_write_lock(arch_rwlock_t *lock)
3235 {
3236 unsigned long mask, tmp1, tmp2;
3237
3238 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3239 : "memory");
3240 }
3241
3242 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3243 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3244 {
3245 __asm__ __volatile__(
3246 " stw %%g0, [%0]"
3247 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3248 : "memory");
3249 }
3250
3251 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3252 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3253 {
3254 unsigned long mask, tmp1, tmp2, result;
3255
3256 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h
3257 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-05-19 00:06:34.000000000 -0400
3258 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-08-05 19:44:33.000000000 -0400
3259 @@ -50,6 +50,8 @@ struct thread_info {
3260 unsigned long w_saved;
3261
3262 struct restart_block restart_block;
3263 +
3264 + unsigned long lowest_stack;
3265 };
3266
3267 /*
3268 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h
3269 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-05-19 00:06:34.000000000 -0400
3270 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-08-05 19:44:33.000000000 -0400
3271 @@ -63,6 +63,8 @@ struct thread_info {
3272 struct pt_regs *kern_una_regs;
3273 unsigned int kern_una_insn;
3274
3275 + unsigned long lowest_stack;
3276 +
3277 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3278 };
3279
3280 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h
3281 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
3282 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
3283 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3284
3285 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3286 {
3287 - if (n && __access_ok((unsigned long) to, n))
3288 + if ((long)n < 0)
3289 + return n;
3290 +
3291 + if (n && __access_ok((unsigned long) to, n)) {
3292 + if (!__builtin_constant_p(n))
3293 + check_object_size(from, n, true);
3294 return __copy_user(to, (__force void __user *) from, n);
3295 - else
3296 + } else
3297 return n;
3298 }
3299
3300 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3301 {
3302 + if ((long)n < 0)
3303 + return n;
3304 +
3305 + if (!__builtin_constant_p(n))
3306 + check_object_size(from, n, true);
3307 +
3308 return __copy_user(to, (__force void __user *) from, n);
3309 }
3310
3311 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3312 {
3313 - if (n && __access_ok((unsigned long) from, n))
3314 + if ((long)n < 0)
3315 + return n;
3316 +
3317 + if (n && __access_ok((unsigned long) from, n)) {
3318 + if (!__builtin_constant_p(n))
3319 + check_object_size(to, n, false);
3320 return __copy_user((__force void __user *) to, from, n);
3321 - else
3322 + } else
3323 return n;
3324 }
3325
3326 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3327 {
3328 + if ((long)n < 0)
3329 + return n;
3330 +
3331 return __copy_user((__force void __user *) to, from, n);
3332 }
3333
3334 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h
3335 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
3336 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
3337 @@ -10,6 +10,7 @@
3338 #include <linux/compiler.h>
3339 #include <linux/string.h>
3340 #include <linux/thread_info.h>
3341 +#include <linux/kernel.h>
3342 #include <asm/asi.h>
3343 #include <asm/system.h>
3344 #include <asm/spitfire.h>
3345 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3346 static inline unsigned long __must_check
3347 copy_from_user(void *to, const void __user *from, unsigned long size)
3348 {
3349 - unsigned long ret = ___copy_from_user(to, from, size);
3350 + unsigned long ret;
3351
3352 + if ((long)size < 0 || size > INT_MAX)
3353 + return size;
3354 +
3355 + if (!__builtin_constant_p(size))
3356 + check_object_size(to, size, false);
3357 +
3358 + ret = ___copy_from_user(to, from, size);
3359 if (unlikely(ret))
3360 ret = copy_from_user_fixup(to, from, size);
3361
3362 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3363 static inline unsigned long __must_check
3364 copy_to_user(void __user *to, const void *from, unsigned long size)
3365 {
3366 - unsigned long ret = ___copy_to_user(to, from, size);
3367 + unsigned long ret;
3368 +
3369 + if ((long)size < 0 || size > INT_MAX)
3370 + return size;
3371 +
3372 + if (!__builtin_constant_p(size))
3373 + check_object_size(from, size, true);
3374
3375 + ret = ___copy_to_user(to, from, size);
3376 if (unlikely(ret))
3377 ret = copy_to_user_fixup(to, from, size);
3378 return ret;
3379 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess.h linux-2.6.39.4/arch/sparc/include/asm/uaccess.h
3380 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
3381 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
3382 @@ -1,5 +1,13 @@
3383 #ifndef ___ASM_SPARC_UACCESS_H
3384 #define ___ASM_SPARC_UACCESS_H
3385 +
3386 +#ifdef __KERNEL__
3387 +#ifndef __ASSEMBLY__
3388 +#include <linux/types.h>
3389 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3390 +#endif
3391 +#endif
3392 +
3393 #if defined(__sparc__) && defined(__arch64__)
3394 #include <asm/uaccess_64.h>
3395 #else
3396 diff -urNp linux-2.6.39.4/arch/sparc/kernel/Makefile linux-2.6.39.4/arch/sparc/kernel/Makefile
3397 --- linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-05-19 00:06:34.000000000 -0400
3398 +++ linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-08-05 19:44:33.000000000 -0400
3399 @@ -3,7 +3,7 @@
3400 #
3401
3402 asflags-y := -ansi
3403 -ccflags-y := -Werror
3404 +#ccflags-y := -Werror
3405
3406 extra-y := head_$(BITS).o
3407 extra-y += init_task.o
3408 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_32.c linux-2.6.39.4/arch/sparc/kernel/process_32.c
3409 --- linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400
3410 +++ linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-08-05 19:44:33.000000000 -0400
3411 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
3412 rw->ins[4], rw->ins[5],
3413 rw->ins[6],
3414 rw->ins[7]);
3415 - printk("%pS\n", (void *) rw->ins[7]);
3416 + printk("%pA\n", (void *) rw->ins[7]);
3417 rw = (struct reg_window32 *) rw->ins[6];
3418 }
3419 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3420 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
3421
3422 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3423 r->psr, r->pc, r->npc, r->y, print_tainted());
3424 - printk("PC: <%pS>\n", (void *) r->pc);
3425 + printk("PC: <%pA>\n", (void *) r->pc);
3426 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3427 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3428 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3429 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3430 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3431 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3432 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3433 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3434
3435 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3436 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3437 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
3438 rw = (struct reg_window32 *) fp;
3439 pc = rw->ins[7];
3440 printk("[%08lx : ", pc);
3441 - printk("%pS ] ", (void *) pc);
3442 + printk("%pA ] ", (void *) pc);
3443 fp = rw->ins[6];
3444 } while (++count < 16);
3445 printk("\n");
3446 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_64.c linux-2.6.39.4/arch/sparc/kernel/process_64.c
3447 --- linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400
3448 +++ linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-08-05 19:44:33.000000000 -0400
3449 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3450 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3451 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3452 if (regs->tstate & TSTATE_PRIV)
3453 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3454 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3455 }
3456
3457 void show_regs(struct pt_regs *regs)
3458 {
3459 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3460 regs->tpc, regs->tnpc, regs->y, print_tainted());
3461 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3462 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3463 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3464 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3465 regs->u_regs[3]);
3466 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3467 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3468 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3469 regs->u_regs[15]);
3470 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3471 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3472 show_regwindow(regs);
3473 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3474 }
3475 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3476 ((tp && tp->task) ? tp->task->pid : -1));
3477
3478 if (gp->tstate & TSTATE_PRIV) {
3479 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3480 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3481 (void *) gp->tpc,
3482 (void *) gp->o7,
3483 (void *) gp->i7,
3484 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c
3485 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-05-19 00:06:34.000000000 -0400
3486 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-05 19:44:33.000000000 -0400
3487 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3488 if (ARCH_SUN4C && len > 0x20000000)
3489 return -ENOMEM;
3490 if (!addr)
3491 - addr = TASK_UNMAPPED_BASE;
3492 + addr = current->mm->mmap_base;
3493
3494 if (flags & MAP_SHARED)
3495 addr = COLOUR_ALIGN(addr);
3496 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3497 }
3498 if (TASK_SIZE - PAGE_SIZE - len < addr)
3499 return -ENOMEM;
3500 - if (!vmm || addr + len <= vmm->vm_start)
3501 + if (check_heap_stack_gap(vmm, addr, len))
3502 return addr;
3503 addr = vmm->vm_end;
3504 if (flags & MAP_SHARED)
3505 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c
3506 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-05-19 00:06:34.000000000 -0400
3507 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-05 19:44:33.000000000 -0400
3508 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3509 /* We do not accept a shared mapping if it would violate
3510 * cache aliasing constraints.
3511 */
3512 - if ((flags & MAP_SHARED) &&
3513 + if ((filp || (flags & MAP_SHARED)) &&
3514 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3515 return -EINVAL;
3516 return addr;
3517 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3518 if (filp || (flags & MAP_SHARED))
3519 do_color_align = 1;
3520
3521 +#ifdef CONFIG_PAX_RANDMMAP
3522 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3523 +#endif
3524 +
3525 if (addr) {
3526 if (do_color_align)
3527 addr = COLOUR_ALIGN(addr, pgoff);
3528 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3529 addr = PAGE_ALIGN(addr);
3530
3531 vma = find_vma(mm, addr);
3532 - if (task_size - len >= addr &&
3533 - (!vma || addr + len <= vma->vm_start))
3534 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3535 return addr;
3536 }
3537
3538 if (len > mm->cached_hole_size) {
3539 - start_addr = addr = mm->free_area_cache;
3540 + start_addr = addr = mm->free_area_cache;
3541 } else {
3542 - start_addr = addr = TASK_UNMAPPED_BASE;
3543 + start_addr = addr = mm->mmap_base;
3544 mm->cached_hole_size = 0;
3545 }
3546
3547 @@ -174,14 +177,14 @@ full_search:
3548 vma = find_vma(mm, VA_EXCLUDE_END);
3549 }
3550 if (unlikely(task_size < addr)) {
3551 - if (start_addr != TASK_UNMAPPED_BASE) {
3552 - start_addr = addr = TASK_UNMAPPED_BASE;
3553 + if (start_addr != mm->mmap_base) {
3554 + start_addr = addr = mm->mmap_base;
3555 mm->cached_hole_size = 0;
3556 goto full_search;
3557 }
3558 return -ENOMEM;
3559 }
3560 - if (likely(!vma || addr + len <= vma->vm_start)) {
3561 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3562 /*
3563 * Remember the place where we stopped the search:
3564 */
3565 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3566 /* We do not accept a shared mapping if it would violate
3567 * cache aliasing constraints.
3568 */
3569 - if ((flags & MAP_SHARED) &&
3570 + if ((filp || (flags & MAP_SHARED)) &&
3571 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3572 return -EINVAL;
3573 return addr;
3574 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3575 addr = PAGE_ALIGN(addr);
3576
3577 vma = find_vma(mm, addr);
3578 - if (task_size - len >= addr &&
3579 - (!vma || addr + len <= vma->vm_start))
3580 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3581 return addr;
3582 }
3583
3584 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3585 /* make sure it can fit in the remaining address space */
3586 if (likely(addr > len)) {
3587 vma = find_vma(mm, addr-len);
3588 - if (!vma || addr <= vma->vm_start) {
3589 + if (check_heap_stack_gap(vma, addr - len, len)) {
3590 /* remember the address as a hint for next time */
3591 return (mm->free_area_cache = addr-len);
3592 }
3593 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3594 if (unlikely(mm->mmap_base < len))
3595 goto bottomup;
3596
3597 - addr = mm->mmap_base-len;
3598 - if (do_color_align)
3599 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3600 + addr = mm->mmap_base - len;
3601
3602 do {
3603 + if (do_color_align)
3604 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3605 /*
3606 * Lookup failure means no vma is above this address,
3607 * else if new region fits below vma->vm_start,
3608 * return with success:
3609 */
3610 vma = find_vma(mm, addr);
3611 - if (likely(!vma || addr+len <= vma->vm_start)) {
3612 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3613 /* remember the address as a hint for next time */
3614 return (mm->free_area_cache = addr);
3615 }
3616 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3617 mm->cached_hole_size = vma->vm_start - addr;
3618
3619 /* try just below the current vma->vm_start */
3620 - addr = vma->vm_start-len;
3621 - if (do_color_align)
3622 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3623 - } while (likely(len < vma->vm_start));
3624 + addr = skip_heap_stack_gap(vma, len);
3625 + } while (!IS_ERR_VALUE(addr));
3626
3627 bottomup:
3628 /*
3629 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3630 gap == RLIM_INFINITY ||
3631 sysctl_legacy_va_layout) {
3632 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3633 +
3634 +#ifdef CONFIG_PAX_RANDMMAP
3635 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3636 + mm->mmap_base += mm->delta_mmap;
3637 +#endif
3638 +
3639 mm->get_unmapped_area = arch_get_unmapped_area;
3640 mm->unmap_area = arch_unmap_area;
3641 } else {
3642 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3643 gap = (task_size / 6 * 5);
3644
3645 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3646 +
3647 +#ifdef CONFIG_PAX_RANDMMAP
3648 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3649 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3650 +#endif
3651 +
3652 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3653 mm->unmap_area = arch_unmap_area_topdown;
3654 }
3655 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_32.c linux-2.6.39.4/arch/sparc/kernel/traps_32.c
3656 --- linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-05-19 00:06:34.000000000 -0400
3657 +++ linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-08-05 19:44:33.000000000 -0400
3658 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3659 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3660 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3661
3662 +extern void gr_handle_kernel_exploit(void);
3663 +
3664 void die_if_kernel(char *str, struct pt_regs *regs)
3665 {
3666 static int die_counter;
3667 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3668 count++ < 30 &&
3669 (((unsigned long) rw) >= PAGE_OFFSET) &&
3670 !(((unsigned long) rw) & 0x7)) {
3671 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3672 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3673 (void *) rw->ins[7]);
3674 rw = (struct reg_window32 *)rw->ins[6];
3675 }
3676 }
3677 printk("Instruction DUMP:");
3678 instruction_dump ((unsigned long *) regs->pc);
3679 - if(regs->psr & PSR_PS)
3680 + if(regs->psr & PSR_PS) {
3681 + gr_handle_kernel_exploit();
3682 do_exit(SIGKILL);
3683 + }
3684 do_exit(SIGSEGV);
3685 }
3686
3687 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_64.c linux-2.6.39.4/arch/sparc/kernel/traps_64.c
3688 --- linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-05-19 00:06:34.000000000 -0400
3689 +++ linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-08-05 19:44:33.000000000 -0400
3690 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3691 i + 1,
3692 p->trapstack[i].tstate, p->trapstack[i].tpc,
3693 p->trapstack[i].tnpc, p->trapstack[i].tt);
3694 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3695 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3696 }
3697 }
3698
3699 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3700
3701 lvl -= 0x100;
3702 if (regs->tstate & TSTATE_PRIV) {
3703 +
3704 +#ifdef CONFIG_PAX_REFCOUNT
3705 + if (lvl == 6)
3706 + pax_report_refcount_overflow(regs);
3707 +#endif
3708 +
3709 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3710 die_if_kernel(buffer, regs);
3711 }
3712 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3713 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3714 {
3715 char buffer[32];
3716 -
3717 +
3718 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3719 0, lvl, SIGTRAP) == NOTIFY_STOP)
3720 return;
3721
3722 +#ifdef CONFIG_PAX_REFCOUNT
3723 + if (lvl == 6)
3724 + pax_report_refcount_overflow(regs);
3725 +#endif
3726 +
3727 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3728
3729 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3730 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3731 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3732 printk("%s" "ERROR(%d): ",
3733 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3734 - printk("TPC<%pS>\n", (void *) regs->tpc);
3735 + printk("TPC<%pA>\n", (void *) regs->tpc);
3736 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3737 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3738 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3739 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3740 smp_processor_id(),
3741 (type & 0x1) ? 'I' : 'D',
3742 regs->tpc);
3743 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3744 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3745 panic("Irrecoverable Cheetah+ parity error.");
3746 }
3747
3748 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3749 smp_processor_id(),
3750 (type & 0x1) ? 'I' : 'D',
3751 regs->tpc);
3752 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3753 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3754 }
3755
3756 struct sun4v_error_entry {
3757 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3758
3759 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3760 regs->tpc, tl);
3761 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3762 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3763 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3764 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3765 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3766 (void *) regs->u_regs[UREG_I7]);
3767 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3768 "pte[%lx] error[%lx]\n",
3769 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3770
3771 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3772 regs->tpc, tl);
3773 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3774 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3775 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3776 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3777 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3778 (void *) regs->u_regs[UREG_I7]);
3779 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3780 "pte[%lx] error[%lx]\n",
3781 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3782 fp = (unsigned long)sf->fp + STACK_BIAS;
3783 }
3784
3785 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3786 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3787 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3788 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3789 int index = tsk->curr_ret_stack;
3790 if (tsk->ret_stack && index >= graph) {
3791 pc = tsk->ret_stack[index - graph].ret;
3792 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3793 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3794 graph++;
3795 }
3796 }
3797 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3798 return (struct reg_window *) (fp + STACK_BIAS);
3799 }
3800
3801 +extern void gr_handle_kernel_exploit(void);
3802 +
3803 void die_if_kernel(char *str, struct pt_regs *regs)
3804 {
3805 static int die_counter;
3806 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3807 while (rw &&
3808 count++ < 30 &&
3809 kstack_valid(tp, (unsigned long) rw)) {
3810 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3811 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3812 (void *) rw->ins[7]);
3813
3814 rw = kernel_stack_up(rw);
3815 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3816 }
3817 user_instruction_dump ((unsigned int __user *) regs->tpc);
3818 }
3819 - if (regs->tstate & TSTATE_PRIV)
3820 + if (regs->tstate & TSTATE_PRIV) {
3821 + gr_handle_kernel_exploit();
3822 do_exit(SIGKILL);
3823 + }
3824 do_exit(SIGSEGV);
3825 }
3826 EXPORT_SYMBOL(die_if_kernel);
3827 diff -urNp linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c
3828 --- linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-05-19 00:06:34.000000000 -0400
3829 +++ linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-08-05 19:44:33.000000000 -0400
3830 @@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs
3831 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3832
3833 if (__ratelimit(&ratelimit)) {
3834 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3835 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3836 regs->tpc, (void *) regs->tpc);
3837 }
3838 }
3839 diff -urNp linux-2.6.39.4/arch/sparc/lib/atomic_64.S linux-2.6.39.4/arch/sparc/lib/atomic_64.S
3840 --- linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-05-19 00:06:34.000000000 -0400
3841 +++ linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-08-05 19:44:33.000000000 -0400
3842 @@ -18,7 +18,12 @@
3843 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3844 BACKOFF_SETUP(%o2)
3845 1: lduw [%o1], %g1
3846 - add %g1, %o0, %g7
3847 + addcc %g1, %o0, %g7
3848 +
3849 +#ifdef CONFIG_PAX_REFCOUNT
3850 + tvs %icc, 6
3851 +#endif
3852 +
3853 cas [%o1], %g1, %g7
3854 cmp %g1, %g7
3855 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3856 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3857 2: BACKOFF_SPIN(%o2, %o3, 1b)
3858 .size atomic_add, .-atomic_add
3859
3860 + .globl atomic_add_unchecked
3861 + .type atomic_add_unchecked,#function
3862 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3863 + BACKOFF_SETUP(%o2)
3864 +1: lduw [%o1], %g1
3865 + add %g1, %o0, %g7
3866 + cas [%o1], %g1, %g7
3867 + cmp %g1, %g7
3868 + bne,pn %icc, 2f
3869 + nop
3870 + retl
3871 + nop
3872 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3873 + .size atomic_add_unchecked, .-atomic_add_unchecked
3874 +
3875 .globl atomic_sub
3876 .type atomic_sub,#function
3877 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3878 BACKOFF_SETUP(%o2)
3879 1: lduw [%o1], %g1
3880 - sub %g1, %o0, %g7
3881 + subcc %g1, %o0, %g7
3882 +
3883 +#ifdef CONFIG_PAX_REFCOUNT
3884 + tvs %icc, 6
3885 +#endif
3886 +
3887 cas [%o1], %g1, %g7
3888 cmp %g1, %g7
3889 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3890 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3891 2: BACKOFF_SPIN(%o2, %o3, 1b)
3892 .size atomic_sub, .-atomic_sub
3893
3894 + .globl atomic_sub_unchecked
3895 + .type atomic_sub_unchecked,#function
3896 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3897 + BACKOFF_SETUP(%o2)
3898 +1: lduw [%o1], %g1
3899 + sub %g1, %o0, %g7
3900 + cas [%o1], %g1, %g7
3901 + cmp %g1, %g7
3902 + bne,pn %icc, 2f
3903 + nop
3904 + retl
3905 + nop
3906 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3907 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3908 +
3909 .globl atomic_add_ret
3910 .type atomic_add_ret,#function
3911 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3912 BACKOFF_SETUP(%o2)
3913 1: lduw [%o1], %g1
3914 - add %g1, %o0, %g7
3915 + addcc %g1, %o0, %g7
3916 +
3917 +#ifdef CONFIG_PAX_REFCOUNT
3918 + tvs %icc, 6
3919 +#endif
3920 +
3921 cas [%o1], %g1, %g7
3922 cmp %g1, %g7
3923 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3924 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3925 2: BACKOFF_SPIN(%o2, %o3, 1b)
3926 .size atomic_add_ret, .-atomic_add_ret
3927
3928 + .globl atomic_add_ret_unchecked
3929 + .type atomic_add_ret_unchecked,#function
3930 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3931 + BACKOFF_SETUP(%o2)
3932 +1: lduw [%o1], %g1
3933 + addcc %g1, %o0, %g7
3934 + cas [%o1], %g1, %g7
3935 + cmp %g1, %g7
3936 + bne,pn %icc, 2f
3937 + add %g7, %o0, %g7
3938 + sra %g7, 0, %o0
3939 + retl
3940 + nop
3941 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3942 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3943 +
3944 .globl atomic_sub_ret
3945 .type atomic_sub_ret,#function
3946 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3947 BACKOFF_SETUP(%o2)
3948 1: lduw [%o1], %g1
3949 - sub %g1, %o0, %g7
3950 + subcc %g1, %o0, %g7
3951 +
3952 +#ifdef CONFIG_PAX_REFCOUNT
3953 + tvs %icc, 6
3954 +#endif
3955 +
3956 cas [%o1], %g1, %g7
3957 cmp %g1, %g7
3958 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3959 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3960 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3961 BACKOFF_SETUP(%o2)
3962 1: ldx [%o1], %g1
3963 - add %g1, %o0, %g7
3964 + addcc %g1, %o0, %g7
3965 +
3966 +#ifdef CONFIG_PAX_REFCOUNT
3967 + tvs %xcc, 6
3968 +#endif
3969 +
3970 casx [%o1], %g1, %g7
3971 cmp %g1, %g7
3972 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3973 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3974 2: BACKOFF_SPIN(%o2, %o3, 1b)
3975 .size atomic64_add, .-atomic64_add
3976
3977 + .globl atomic64_add_unchecked
3978 + .type atomic64_add_unchecked,#function
3979 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3980 + BACKOFF_SETUP(%o2)
3981 +1: ldx [%o1], %g1
3982 + addcc %g1, %o0, %g7
3983 + casx [%o1], %g1, %g7
3984 + cmp %g1, %g7
3985 + bne,pn %xcc, 2f
3986 + nop
3987 + retl
3988 + nop
3989 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3990 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3991 +
3992 .globl atomic64_sub
3993 .type atomic64_sub,#function
3994 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3995 BACKOFF_SETUP(%o2)
3996 1: ldx [%o1], %g1
3997 - sub %g1, %o0, %g7
3998 + subcc %g1, %o0, %g7
3999 +
4000 +#ifdef CONFIG_PAX_REFCOUNT
4001 + tvs %xcc, 6
4002 +#endif
4003 +
4004 casx [%o1], %g1, %g7
4005 cmp %g1, %g7
4006 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4007 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4008 2: BACKOFF_SPIN(%o2, %o3, 1b)
4009 .size atomic64_sub, .-atomic64_sub
4010
4011 + .globl atomic64_sub_unchecked
4012 + .type atomic64_sub_unchecked,#function
4013 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4014 + BACKOFF_SETUP(%o2)
4015 +1: ldx [%o1], %g1
4016 + subcc %g1, %o0, %g7
4017 + casx [%o1], %g1, %g7
4018 + cmp %g1, %g7
4019 + bne,pn %xcc, 2f
4020 + nop
4021 + retl
4022 + nop
4023 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4024 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4025 +
4026 .globl atomic64_add_ret
4027 .type atomic64_add_ret,#function
4028 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4029 BACKOFF_SETUP(%o2)
4030 1: ldx [%o1], %g1
4031 - add %g1, %o0, %g7
4032 + addcc %g1, %o0, %g7
4033 +
4034 +#ifdef CONFIG_PAX_REFCOUNT
4035 + tvs %xcc, 6
4036 +#endif
4037 +
4038 casx [%o1], %g1, %g7
4039 cmp %g1, %g7
4040 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4041 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4042 2: BACKOFF_SPIN(%o2, %o3, 1b)
4043 .size atomic64_add_ret, .-atomic64_add_ret
4044
4045 + .globl atomic64_add_ret_unchecked
4046 + .type atomic64_add_ret_unchecked,#function
4047 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4048 + BACKOFF_SETUP(%o2)
4049 +1: ldx [%o1], %g1
4050 + addcc %g1, %o0, %g7
4051 + casx [%o1], %g1, %g7
4052 + cmp %g1, %g7
4053 + bne,pn %xcc, 2f
4054 + add %g7, %o0, %g7
4055 + mov %g7, %o0
4056 + retl
4057 + nop
4058 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4059 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4060 +
4061 .globl atomic64_sub_ret
4062 .type atomic64_sub_ret,#function
4063 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4064 BACKOFF_SETUP(%o2)
4065 1: ldx [%o1], %g1
4066 - sub %g1, %o0, %g7
4067 + subcc %g1, %o0, %g7
4068 +
4069 +#ifdef CONFIG_PAX_REFCOUNT
4070 + tvs %xcc, 6
4071 +#endif
4072 +
4073 casx [%o1], %g1, %g7
4074 cmp %g1, %g7
4075 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4076 diff -urNp linux-2.6.39.4/arch/sparc/lib/ksyms.c linux-2.6.39.4/arch/sparc/lib/ksyms.c
4077 --- linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-05-19 00:06:34.000000000 -0400
4078 +++ linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-08-05 19:44:33.000000000 -0400
4079 @@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
4080
4081 /* Atomic counter implementation. */
4082 EXPORT_SYMBOL(atomic_add);
4083 +EXPORT_SYMBOL(atomic_add_unchecked);
4084 EXPORT_SYMBOL(atomic_add_ret);
4085 EXPORT_SYMBOL(atomic_sub);
4086 +EXPORT_SYMBOL(atomic_sub_unchecked);
4087 EXPORT_SYMBOL(atomic_sub_ret);
4088 EXPORT_SYMBOL(atomic64_add);
4089 +EXPORT_SYMBOL(atomic64_add_unchecked);
4090 EXPORT_SYMBOL(atomic64_add_ret);
4091 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4092 EXPORT_SYMBOL(atomic64_sub);
4093 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4094 EXPORT_SYMBOL(atomic64_sub_ret);
4095
4096 /* Atomic bit operations. */
4097 diff -urNp linux-2.6.39.4/arch/sparc/lib/Makefile linux-2.6.39.4/arch/sparc/lib/Makefile
4098 --- linux-2.6.39.4/arch/sparc/lib/Makefile 2011-05-19 00:06:34.000000000 -0400
4099 +++ linux-2.6.39.4/arch/sparc/lib/Makefile 2011-08-05 19:44:33.000000000 -0400
4100 @@ -2,7 +2,7 @@
4101 #
4102
4103 asflags-y := -ansi -DST_DIV0=0x02
4104 -ccflags-y := -Werror
4105 +#ccflags-y := -Werror
4106
4107 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4108 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4109 diff -urNp linux-2.6.39.4/arch/sparc/Makefile linux-2.6.39.4/arch/sparc/Makefile
4110 --- linux-2.6.39.4/arch/sparc/Makefile 2011-05-19 00:06:34.000000000 -0400
4111 +++ linux-2.6.39.4/arch/sparc/Makefile 2011-08-05 19:44:33.000000000 -0400
4112 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4113 # Export what is needed by arch/sparc/boot/Makefile
4114 export VMLINUX_INIT VMLINUX_MAIN
4115 VMLINUX_INIT := $(head-y) $(init-y)
4116 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4117 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4118 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4119 VMLINUX_MAIN += $(drivers-y) $(net-y)
4120
4121 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_32.c linux-2.6.39.4/arch/sparc/mm/fault_32.c
4122 --- linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-05-19 00:06:34.000000000 -0400
4123 +++ linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-08-05 19:44:33.000000000 -0400
4124 @@ -22,6 +22,9 @@
4125 #include <linux/interrupt.h>
4126 #include <linux/module.h>
4127 #include <linux/kdebug.h>
4128 +#include <linux/slab.h>
4129 +#include <linux/pagemap.h>
4130 +#include <linux/compiler.h>
4131
4132 #include <asm/system.h>
4133 #include <asm/page.h>
4134 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4135 return safe_compute_effective_address(regs, insn);
4136 }
4137
4138 +#ifdef CONFIG_PAX_PAGEEXEC
4139 +#ifdef CONFIG_PAX_DLRESOLVE
4140 +static void pax_emuplt_close(struct vm_area_struct *vma)
4141 +{
4142 + vma->vm_mm->call_dl_resolve = 0UL;
4143 +}
4144 +
4145 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4146 +{
4147 + unsigned int *kaddr;
4148 +
4149 + vmf->page = alloc_page(GFP_HIGHUSER);
4150 + if (!vmf->page)
4151 + return VM_FAULT_OOM;
4152 +
4153 + kaddr = kmap(vmf->page);
4154 + memset(kaddr, 0, PAGE_SIZE);
4155 + kaddr[0] = 0x9DE3BFA8U; /* save */
4156 + flush_dcache_page(vmf->page);
4157 + kunmap(vmf->page);
4158 + return VM_FAULT_MAJOR;
4159 +}
4160 +
4161 +static const struct vm_operations_struct pax_vm_ops = {
4162 + .close = pax_emuplt_close,
4163 + .fault = pax_emuplt_fault
4164 +};
4165 +
4166 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4167 +{
4168 + int ret;
4169 +
4170 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4171 + vma->vm_mm = current->mm;
4172 + vma->vm_start = addr;
4173 + vma->vm_end = addr + PAGE_SIZE;
4174 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4175 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4176 + vma->vm_ops = &pax_vm_ops;
4177 +
4178 + ret = insert_vm_struct(current->mm, vma);
4179 + if (ret)
4180 + return ret;
4181 +
4182 + ++current->mm->total_vm;
4183 + return 0;
4184 +}
4185 +#endif
4186 +
4187 +/*
4188 + * PaX: decide what to do with offenders (regs->pc = fault address)
4189 + *
4190 + * returns 1 when task should be killed
4191 + * 2 when patched PLT trampoline was detected
4192 + * 3 when unpatched PLT trampoline was detected
4193 + */
4194 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4195 +{
4196 +
4197 +#ifdef CONFIG_PAX_EMUPLT
4198 + int err;
4199 +
4200 + do { /* PaX: patched PLT emulation #1 */
4201 + unsigned int sethi1, sethi2, jmpl;
4202 +
4203 + err = get_user(sethi1, (unsigned int *)regs->pc);
4204 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4205 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4206 +
4207 + if (err)
4208 + break;
4209 +
4210 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4211 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4212 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4213 + {
4214 + unsigned int addr;
4215 +
4216 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4217 + addr = regs->u_regs[UREG_G1];
4218 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4219 + regs->pc = addr;
4220 + regs->npc = addr+4;
4221 + return 2;
4222 + }
4223 + } while (0);
4224 +
4225 + { /* PaX: patched PLT emulation #2 */
4226 + unsigned int ba;
4227 +
4228 + err = get_user(ba, (unsigned int *)regs->pc);
4229 +
4230 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4231 + unsigned int addr;
4232 +
4233 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4234 + regs->pc = addr;
4235 + regs->npc = addr+4;
4236 + return 2;
4237 + }
4238 + }
4239 +
4240 + do { /* PaX: patched PLT emulation #3 */
4241 + unsigned int sethi, jmpl, nop;
4242 +
4243 + err = get_user(sethi, (unsigned int *)regs->pc);
4244 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4245 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4246 +
4247 + if (err)
4248 + break;
4249 +
4250 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4251 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4252 + nop == 0x01000000U)
4253 + {
4254 + unsigned int addr;
4255 +
4256 + addr = (sethi & 0x003FFFFFU) << 10;
4257 + regs->u_regs[UREG_G1] = addr;
4258 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4259 + regs->pc = addr;
4260 + regs->npc = addr+4;
4261 + return 2;
4262 + }
4263 + } while (0);
4264 +
4265 + do { /* PaX: unpatched PLT emulation step 1 */
4266 + unsigned int sethi, ba, nop;
4267 +
4268 + err = get_user(sethi, (unsigned int *)regs->pc);
4269 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4270 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4271 +
4272 + if (err)
4273 + break;
4274 +
4275 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4276 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4277 + nop == 0x01000000U)
4278 + {
4279 + unsigned int addr, save, call;
4280 +
4281 + if ((ba & 0xFFC00000U) == 0x30800000U)
4282 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4283 + else
4284 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4285 +
4286 + err = get_user(save, (unsigned int *)addr);
4287 + err |= get_user(call, (unsigned int *)(addr+4));
4288 + err |= get_user(nop, (unsigned int *)(addr+8));
4289 + if (err)
4290 + break;
4291 +
4292 +#ifdef CONFIG_PAX_DLRESOLVE
4293 + if (save == 0x9DE3BFA8U &&
4294 + (call & 0xC0000000U) == 0x40000000U &&
4295 + nop == 0x01000000U)
4296 + {
4297 + struct vm_area_struct *vma;
4298 + unsigned long call_dl_resolve;
4299 +
4300 + down_read(&current->mm->mmap_sem);
4301 + call_dl_resolve = current->mm->call_dl_resolve;
4302 + up_read(&current->mm->mmap_sem);
4303 + if (likely(call_dl_resolve))
4304 + goto emulate;
4305 +
4306 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4307 +
4308 + down_write(&current->mm->mmap_sem);
4309 + if (current->mm->call_dl_resolve) {
4310 + call_dl_resolve = current->mm->call_dl_resolve;
4311 + up_write(&current->mm->mmap_sem);
4312 + if (vma)
4313 + kmem_cache_free(vm_area_cachep, vma);
4314 + goto emulate;
4315 + }
4316 +
4317 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4318 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4319 + up_write(&current->mm->mmap_sem);
4320 + if (vma)
4321 + kmem_cache_free(vm_area_cachep, vma);
4322 + return 1;
4323 + }
4324 +
4325 + if (pax_insert_vma(vma, call_dl_resolve)) {
4326 + up_write(&current->mm->mmap_sem);
4327 + kmem_cache_free(vm_area_cachep, vma);
4328 + return 1;
4329 + }
4330 +
4331 + current->mm->call_dl_resolve = call_dl_resolve;
4332 + up_write(&current->mm->mmap_sem);
4333 +
4334 +emulate:
4335 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4336 + regs->pc = call_dl_resolve;
4337 + regs->npc = addr+4;
4338 + return 3;
4339 + }
4340 +#endif
4341 +
4342 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4343 + if ((save & 0xFFC00000U) == 0x05000000U &&
4344 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4345 + nop == 0x01000000U)
4346 + {
4347 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4348 + regs->u_regs[UREG_G2] = addr + 4;
4349 + addr = (save & 0x003FFFFFU) << 10;
4350 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4351 + regs->pc = addr;
4352 + regs->npc = addr+4;
4353 + return 3;
4354 + }
4355 + }
4356 + } while (0);
4357 +
4358 + do { /* PaX: unpatched PLT emulation step 2 */
4359 + unsigned int save, call, nop;
4360 +
4361 + err = get_user(save, (unsigned int *)(regs->pc-4));
4362 + err |= get_user(call, (unsigned int *)regs->pc);
4363 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4364 + if (err)
4365 + break;
4366 +
4367 + if (save == 0x9DE3BFA8U &&
4368 + (call & 0xC0000000U) == 0x40000000U &&
4369 + nop == 0x01000000U)
4370 + {
4371 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4372 +
4373 + regs->u_regs[UREG_RETPC] = regs->pc;
4374 + regs->pc = dl_resolve;
4375 + regs->npc = dl_resolve+4;
4376 + return 3;
4377 + }
4378 + } while (0);
4379 +#endif
4380 +
4381 + return 1;
4382 +}
4383 +
4384 +void pax_report_insns(void *pc, void *sp)
4385 +{
4386 + unsigned long i;
4387 +
4388 + printk(KERN_ERR "PAX: bytes at PC: ");
4389 + for (i = 0; i < 8; i++) {
4390 + unsigned int c;
4391 + if (get_user(c, (unsigned int *)pc+i))
4392 + printk(KERN_CONT "???????? ");
4393 + else
4394 + printk(KERN_CONT "%08x ", c);
4395 + }
4396 + printk("\n");
4397 +}
4398 +#endif
4399 +
4400 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4401 int text_fault)
4402 {
4403 @@ -281,6 +546,24 @@ good_area:
4404 if(!(vma->vm_flags & VM_WRITE))
4405 goto bad_area;
4406 } else {
4407 +
4408 +#ifdef CONFIG_PAX_PAGEEXEC
4409 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4410 + up_read(&mm->mmap_sem);
4411 + switch (pax_handle_fetch_fault(regs)) {
4412 +
4413 +#ifdef CONFIG_PAX_EMUPLT
4414 + case 2:
4415 + case 3:
4416 + return;
4417 +#endif
4418 +
4419 + }
4420 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4421 + do_group_exit(SIGKILL);
4422 + }
4423 +#endif
4424 +
4425 /* Allow reads even for write-only mappings */
4426 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4427 goto bad_area;
4428 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_64.c linux-2.6.39.4/arch/sparc/mm/fault_64.c
4429 --- linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-05-19 00:06:34.000000000 -0400
4430 +++ linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-08-05 19:44:33.000000000 -0400
4431 @@ -21,6 +21,9 @@
4432 #include <linux/kprobes.h>
4433 #include <linux/kdebug.h>
4434 #include <linux/percpu.h>
4435 +#include <linux/slab.h>
4436 +#include <linux/pagemap.h>
4437 +#include <linux/compiler.h>
4438
4439 #include <asm/page.h>
4440 #include <asm/pgtable.h>
4441 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4442 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4443 regs->tpc);
4444 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4445 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4446 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4447 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4448 dump_stack();
4449 unhandled_fault(regs->tpc, current, regs);
4450 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4451 show_regs(regs);
4452 }
4453
4454 +#ifdef CONFIG_PAX_PAGEEXEC
4455 +#ifdef CONFIG_PAX_DLRESOLVE
4456 +static void pax_emuplt_close(struct vm_area_struct *vma)
4457 +{
4458 + vma->vm_mm->call_dl_resolve = 0UL;
4459 +}
4460 +
4461 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4462 +{
4463 + unsigned int *kaddr;
4464 +
4465 + vmf->page = alloc_page(GFP_HIGHUSER);
4466 + if (!vmf->page)
4467 + return VM_FAULT_OOM;
4468 +
4469 + kaddr = kmap(vmf->page);
4470 + memset(kaddr, 0, PAGE_SIZE);
4471 + kaddr[0] = 0x9DE3BFA8U; /* save */
4472 + flush_dcache_page(vmf->page);
4473 + kunmap(vmf->page);
4474 + return VM_FAULT_MAJOR;
4475 +}
4476 +
4477 +static const struct vm_operations_struct pax_vm_ops = {
4478 + .close = pax_emuplt_close,
4479 + .fault = pax_emuplt_fault
4480 +};
4481 +
4482 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4483 +{
4484 + int ret;
4485 +
4486 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4487 + vma->vm_mm = current->mm;
4488 + vma->vm_start = addr;
4489 + vma->vm_end = addr + PAGE_SIZE;
4490 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4491 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4492 + vma->vm_ops = &pax_vm_ops;
4493 +
4494 + ret = insert_vm_struct(current->mm, vma);
4495 + if (ret)
4496 + return ret;
4497 +
4498 + ++current->mm->total_vm;
4499 + return 0;
4500 +}
4501 +#endif
4502 +
4503 +/*
4504 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4505 + *
4506 + * returns 1 when task should be killed
4507 + * 2 when patched PLT trampoline was detected
4508 + * 3 when unpatched PLT trampoline was detected
4509 + */
4510 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4511 +{
4512 +
4513 +#ifdef CONFIG_PAX_EMUPLT
4514 + int err;
4515 +
4516 + do { /* PaX: patched PLT emulation #1 */
4517 + unsigned int sethi1, sethi2, jmpl;
4518 +
4519 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4520 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4521 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4522 +
4523 + if (err)
4524 + break;
4525 +
4526 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4527 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4528 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4529 + {
4530 + unsigned long addr;
4531 +
4532 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4533 + addr = regs->u_regs[UREG_G1];
4534 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4535 +
4536 + if (test_thread_flag(TIF_32BIT))
4537 + addr &= 0xFFFFFFFFUL;
4538 +
4539 + regs->tpc = addr;
4540 + regs->tnpc = addr+4;
4541 + return 2;
4542 + }
4543 + } while (0);
4544 +
4545 + { /* PaX: patched PLT emulation #2 */
4546 + unsigned int ba;
4547 +
4548 + err = get_user(ba, (unsigned int *)regs->tpc);
4549 +
4550 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4551 + unsigned long addr;
4552 +
4553 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4554 +
4555 + if (test_thread_flag(TIF_32BIT))
4556 + addr &= 0xFFFFFFFFUL;
4557 +
4558 + regs->tpc = addr;
4559 + regs->tnpc = addr+4;
4560 + return 2;
4561 + }
4562 + }
4563 +
4564 + do { /* PaX: patched PLT emulation #3 */
4565 + unsigned int sethi, jmpl, nop;
4566 +
4567 + err = get_user(sethi, (unsigned int *)regs->tpc);
4568 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4569 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4576 + nop == 0x01000000U)
4577 + {
4578 + unsigned long addr;
4579 +
4580 + addr = (sethi & 0x003FFFFFU) << 10;
4581 + regs->u_regs[UREG_G1] = addr;
4582 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #4 */
4594 + unsigned int sethi, mov1, call, mov2;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4600 +
4601 + if (err)
4602 + break;
4603 +
4604 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4605 + mov1 == 0x8210000FU &&
4606 + (call & 0xC0000000U) == 0x40000000U &&
4607 + mov2 == 0x9E100001U)
4608 + {
4609 + unsigned long addr;
4610 +
4611 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4612 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4613 +
4614 + if (test_thread_flag(TIF_32BIT))
4615 + addr &= 0xFFFFFFFFUL;
4616 +
4617 + regs->tpc = addr;
4618 + regs->tnpc = addr+4;
4619 + return 2;
4620 + }
4621 + } while (0);
4622 +
4623 + do { /* PaX: patched PLT emulation #5 */
4624 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4625 +
4626 + err = get_user(sethi, (unsigned int *)regs->tpc);
4627 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4628 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4629 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4630 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4631 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4632 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4633 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4634 +
4635 + if (err)
4636 + break;
4637 +
4638 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4639 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4640 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4641 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4642 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4643 + sllx == 0x83287020U &&
4644 + jmpl == 0x81C04005U &&
4645 + nop == 0x01000000U)
4646 + {
4647 + unsigned long addr;
4648 +
4649 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4650 + regs->u_regs[UREG_G1] <<= 32;
4651 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4652 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4653 + regs->tpc = addr;
4654 + regs->tnpc = addr+4;
4655 + return 2;
4656 + }
4657 + } while (0);
4658 +
4659 + do { /* PaX: patched PLT emulation #6 */
4660 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4661 +
4662 + err = get_user(sethi, (unsigned int *)regs->tpc);
4663 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4664 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4665 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4666 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4667 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4675 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4676 + sllx == 0x83287020U &&
4677 + (or & 0xFFFFE000U) == 0x8A116000U &&
4678 + jmpl == 0x81C04005U &&
4679 + nop == 0x01000000U)
4680 + {
4681 + unsigned long addr;
4682 +
4683 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4684 + regs->u_regs[UREG_G1] <<= 32;
4685 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4686 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4687 + regs->tpc = addr;
4688 + regs->tnpc = addr+4;
4689 + return 2;
4690 + }
4691 + } while (0);
4692 +
4693 + do { /* PaX: unpatched PLT emulation step 1 */
4694 + unsigned int sethi, ba, nop;
4695 +
4696 + err = get_user(sethi, (unsigned int *)regs->tpc);
4697 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4698 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4699 +
4700 + if (err)
4701 + break;
4702 +
4703 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4704 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4705 + nop == 0x01000000U)
4706 + {
4707 + unsigned long addr;
4708 + unsigned int save, call;
4709 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4710 +
4711 + if ((ba & 0xFFC00000U) == 0x30800000U)
4712 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4713 + else
4714 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4715 +
4716 + if (test_thread_flag(TIF_32BIT))
4717 + addr &= 0xFFFFFFFFUL;
4718 +
4719 + err = get_user(save, (unsigned int *)addr);
4720 + err |= get_user(call, (unsigned int *)(addr+4));
4721 + err |= get_user(nop, (unsigned int *)(addr+8));
4722 + if (err)
4723 + break;
4724 +
4725 +#ifdef CONFIG_PAX_DLRESOLVE
4726 + if (save == 0x9DE3BFA8U &&
4727 + (call & 0xC0000000U) == 0x40000000U &&
4728 + nop == 0x01000000U)
4729 + {
4730 + struct vm_area_struct *vma;
4731 + unsigned long call_dl_resolve;
4732 +
4733 + down_read(&current->mm->mmap_sem);
4734 + call_dl_resolve = current->mm->call_dl_resolve;
4735 + up_read(&current->mm->mmap_sem);
4736 + if (likely(call_dl_resolve))
4737 + goto emulate;
4738 +
4739 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4740 +
4741 + down_write(&current->mm->mmap_sem);
4742 + if (current->mm->call_dl_resolve) {
4743 + call_dl_resolve = current->mm->call_dl_resolve;
4744 + up_write(&current->mm->mmap_sem);
4745 + if (vma)
4746 + kmem_cache_free(vm_area_cachep, vma);
4747 + goto emulate;
4748 + }
4749 +
4750 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4751 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4752 + up_write(&current->mm->mmap_sem);
4753 + if (vma)
4754 + kmem_cache_free(vm_area_cachep, vma);
4755 + return 1;
4756 + }
4757 +
4758 + if (pax_insert_vma(vma, call_dl_resolve)) {
4759 + up_write(&current->mm->mmap_sem);
4760 + kmem_cache_free(vm_area_cachep, vma);
4761 + return 1;
4762 + }
4763 +
4764 + current->mm->call_dl_resolve = call_dl_resolve;
4765 + up_write(&current->mm->mmap_sem);
4766 +
4767 +emulate:
4768 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4769 + regs->tpc = call_dl_resolve;
4770 + regs->tnpc = addr+4;
4771 + return 3;
4772 + }
4773 +#endif
4774 +
4775 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4776 + if ((save & 0xFFC00000U) == 0x05000000U &&
4777 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4778 + nop == 0x01000000U)
4779 + {
4780 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4781 + regs->u_regs[UREG_G2] = addr + 4;
4782 + addr = (save & 0x003FFFFFU) << 10;
4783 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4784 +
4785 + if (test_thread_flag(TIF_32BIT))
4786 + addr &= 0xFFFFFFFFUL;
4787 +
4788 + regs->tpc = addr;
4789 + regs->tnpc = addr+4;
4790 + return 3;
4791 + }
4792 +
4793 + /* PaX: 64-bit PLT stub */
4794 + err = get_user(sethi1, (unsigned int *)addr);
4795 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4796 + err |= get_user(or1, (unsigned int *)(addr+8));
4797 + err |= get_user(or2, (unsigned int *)(addr+12));
4798 + err |= get_user(sllx, (unsigned int *)(addr+16));
4799 + err |= get_user(add, (unsigned int *)(addr+20));
4800 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4801 + err |= get_user(nop, (unsigned int *)(addr+28));
4802 + if (err)
4803 + break;
4804 +
4805 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4806 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4807 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4808 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4809 + sllx == 0x89293020U &&
4810 + add == 0x8A010005U &&
4811 + jmpl == 0x89C14000U &&
4812 + nop == 0x01000000U)
4813 + {
4814 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4815 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4816 + regs->u_regs[UREG_G4] <<= 32;
4817 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4818 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4819 + regs->u_regs[UREG_G4] = addr + 24;
4820 + addr = regs->u_regs[UREG_G5];
4821 + regs->tpc = addr;
4822 + regs->tnpc = addr+4;
4823 + return 3;
4824 + }
4825 + }
4826 + } while (0);
4827 +
4828 +#ifdef CONFIG_PAX_DLRESOLVE
4829 + do { /* PaX: unpatched PLT emulation step 2 */
4830 + unsigned int save, call, nop;
4831 +
4832 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4833 + err |= get_user(call, (unsigned int *)regs->tpc);
4834 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4835 + if (err)
4836 + break;
4837 +
4838 + if (save == 0x9DE3BFA8U &&
4839 + (call & 0xC0000000U) == 0x40000000U &&
4840 + nop == 0x01000000U)
4841 + {
4842 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4843 +
4844 + if (test_thread_flag(TIF_32BIT))
4845 + dl_resolve &= 0xFFFFFFFFUL;
4846 +
4847 + regs->u_regs[UREG_RETPC] = regs->tpc;
4848 + regs->tpc = dl_resolve;
4849 + regs->tnpc = dl_resolve+4;
4850 + return 3;
4851 + }
4852 + } while (0);
4853 +#endif
4854 +
4855 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4856 + unsigned int sethi, ba, nop;
4857 +
4858 + err = get_user(sethi, (unsigned int *)regs->tpc);
4859 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4860 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4861 +
4862 + if (err)
4863 + break;
4864 +
4865 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4866 + (ba & 0xFFF00000U) == 0x30600000U &&
4867 + nop == 0x01000000U)
4868 + {
4869 + unsigned long addr;
4870 +
4871 + addr = (sethi & 0x003FFFFFU) << 10;
4872 + regs->u_regs[UREG_G1] = addr;
4873 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4874 +
4875 + if (test_thread_flag(TIF_32BIT))
4876 + addr &= 0xFFFFFFFFUL;
4877 +
4878 + regs->tpc = addr;
4879 + regs->tnpc = addr+4;
4880 + return 2;
4881 + }
4882 + } while (0);
4883 +
4884 +#endif
4885 +
4886 + return 1;
4887 +}
4888 +
4889 +void pax_report_insns(void *pc, void *sp)
4890 +{
4891 + unsigned long i;
4892 +
4893 + printk(KERN_ERR "PAX: bytes at PC: ");
4894 + for (i = 0; i < 8; i++) {
4895 + unsigned int c;
4896 + if (get_user(c, (unsigned int *)pc+i))
4897 + printk(KERN_CONT "???????? ");
4898 + else
4899 + printk(KERN_CONT "%08x ", c);
4900 + }
4901 + printk("\n");
4902 +}
4903 +#endif
4904 +
4905 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4906 {
4907 struct mm_struct *mm = current->mm;
4908 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4909 if (!vma)
4910 goto bad_area;
4911
4912 +#ifdef CONFIG_PAX_PAGEEXEC
4913 + /* PaX: detect ITLB misses on non-exec pages */
4914 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4915 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4916 + {
4917 + if (address != regs->tpc)
4918 + goto good_area;
4919 +
4920 + up_read(&mm->mmap_sem);
4921 + switch (pax_handle_fetch_fault(regs)) {
4922 +
4923 +#ifdef CONFIG_PAX_EMUPLT
4924 + case 2:
4925 + case 3:
4926 + return;
4927 +#endif
4928 +
4929 + }
4930 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4931 + do_group_exit(SIGKILL);
4932 + }
4933 +#endif
4934 +
4935 /* Pure DTLB misses do not tell us whether the fault causing
4936 * load/store/atomic was a write or not, it only says that there
4937 * was no match. So in such a case we (carefully) read the
4938 diff -urNp linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c
4939 --- linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
4940 +++ linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
4941 @@ -68,7 +68,7 @@ full_search:
4942 }
4943 return -ENOMEM;
4944 }
4945 - if (likely(!vma || addr + len <= vma->vm_start)) {
4946 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4947 /*
4948 * Remember the place where we stopped the search:
4949 */
4950 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4951 /* make sure it can fit in the remaining address space */
4952 if (likely(addr > len)) {
4953 vma = find_vma(mm, addr-len);
4954 - if (!vma || addr <= vma->vm_start) {
4955 + if (check_heap_stack_gap(vma, addr - len, len)) {
4956 /* remember the address as a hint for next time */
4957 return (mm->free_area_cache = addr-len);
4958 }
4959 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4960 if (unlikely(mm->mmap_base < len))
4961 goto bottomup;
4962
4963 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4964 + addr = mm->mmap_base - len;
4965
4966 do {
4967 + addr &= HPAGE_MASK;
4968 /*
4969 * Lookup failure means no vma is above this address,
4970 * else if new region fits below vma->vm_start,
4971 * return with success:
4972 */
4973 vma = find_vma(mm, addr);
4974 - if (likely(!vma || addr+len <= vma->vm_start)) {
4975 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4976 /* remember the address as a hint for next time */
4977 return (mm->free_area_cache = addr);
4978 }
4979 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4980 mm->cached_hole_size = vma->vm_start - addr;
4981
4982 /* try just below the current vma->vm_start */
4983 - addr = (vma->vm_start-len) & HPAGE_MASK;
4984 - } while (likely(len < vma->vm_start));
4985 + addr = skip_heap_stack_gap(vma, len);
4986 + } while (!IS_ERR_VALUE(addr));
4987
4988 bottomup:
4989 /*
4990 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4991 if (addr) {
4992 addr = ALIGN(addr, HPAGE_SIZE);
4993 vma = find_vma(mm, addr);
4994 - if (task_size - len >= addr &&
4995 - (!vma || addr + len <= vma->vm_start))
4996 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4997 return addr;
4998 }
4999 if (mm->get_unmapped_area == arch_get_unmapped_area)
5000 diff -urNp linux-2.6.39.4/arch/sparc/mm/init_32.c linux-2.6.39.4/arch/sparc/mm/init_32.c
5001 --- linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
5002 +++ linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-08-05 19:44:33.000000000 -0400
5003 @@ -318,6 +318,9 @@ extern void device_scan(void);
5004 pgprot_t PAGE_SHARED __read_mostly;
5005 EXPORT_SYMBOL(PAGE_SHARED);
5006
5007 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5008 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5009 +
5010 void __init paging_init(void)
5011 {
5012 switch(sparc_cpu_model) {
5013 @@ -346,17 +349,17 @@ void __init paging_init(void)
5014
5015 /* Initialize the protection map with non-constant, MMU dependent values. */
5016 protection_map[0] = PAGE_NONE;
5017 - protection_map[1] = PAGE_READONLY;
5018 - protection_map[2] = PAGE_COPY;
5019 - protection_map[3] = PAGE_COPY;
5020 + protection_map[1] = PAGE_READONLY_NOEXEC;
5021 + protection_map[2] = PAGE_COPY_NOEXEC;
5022 + protection_map[3] = PAGE_COPY_NOEXEC;
5023 protection_map[4] = PAGE_READONLY;
5024 protection_map[5] = PAGE_READONLY;
5025 protection_map[6] = PAGE_COPY;
5026 protection_map[7] = PAGE_COPY;
5027 protection_map[8] = PAGE_NONE;
5028 - protection_map[9] = PAGE_READONLY;
5029 - protection_map[10] = PAGE_SHARED;
5030 - protection_map[11] = PAGE_SHARED;
5031 + protection_map[9] = PAGE_READONLY_NOEXEC;
5032 + protection_map[10] = PAGE_SHARED_NOEXEC;
5033 + protection_map[11] = PAGE_SHARED_NOEXEC;
5034 protection_map[12] = PAGE_READONLY;
5035 protection_map[13] = PAGE_READONLY;
5036 protection_map[14] = PAGE_SHARED;
5037 diff -urNp linux-2.6.39.4/arch/sparc/mm/Makefile linux-2.6.39.4/arch/sparc/mm/Makefile
5038 --- linux-2.6.39.4/arch/sparc/mm/Makefile 2011-05-19 00:06:34.000000000 -0400
5039 +++ linux-2.6.39.4/arch/sparc/mm/Makefile 2011-08-05 19:44:33.000000000 -0400
5040 @@ -2,7 +2,7 @@
5041 #
5042
5043 asflags-y := -ansi
5044 -ccflags-y := -Werror
5045 +#ccflags-y := -Werror
5046
5047 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5048 obj-y += fault_$(BITS).o
5049 diff -urNp linux-2.6.39.4/arch/sparc/mm/srmmu.c linux-2.6.39.4/arch/sparc/mm/srmmu.c
5050 --- linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-05-19 00:06:34.000000000 -0400
5051 +++ linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-08-05 19:44:33.000000000 -0400
5052 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5053 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5054 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5055 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5056 +
5057 +#ifdef CONFIG_PAX_PAGEEXEC
5058 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5059 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5060 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5061 +#endif
5062 +
5063 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5064 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5065
5066 diff -urNp linux-2.6.39.4/arch/um/include/asm/kmap_types.h linux-2.6.39.4/arch/um/include/asm/kmap_types.h
5067 --- linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
5068 +++ linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
5069 @@ -23,6 +23,7 @@ enum km_type {
5070 KM_IRQ1,
5071 KM_SOFTIRQ0,
5072 KM_SOFTIRQ1,
5073 + KM_CLEARPAGE,
5074 KM_TYPE_NR
5075 };
5076
5077 diff -urNp linux-2.6.39.4/arch/um/include/asm/page.h linux-2.6.39.4/arch/um/include/asm/page.h
5078 --- linux-2.6.39.4/arch/um/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
5079 +++ linux-2.6.39.4/arch/um/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
5080 @@ -14,6 +14,9 @@
5081 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5082 #define PAGE_MASK (~(PAGE_SIZE-1))
5083
5084 +#define ktla_ktva(addr) (addr)
5085 +#define ktva_ktla(addr) (addr)
5086 +
5087 #ifndef __ASSEMBLY__
5088
5089 struct page;
5090 diff -urNp linux-2.6.39.4/arch/um/kernel/process.c linux-2.6.39.4/arch/um/kernel/process.c
5091 --- linux-2.6.39.4/arch/um/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
5092 +++ linux-2.6.39.4/arch/um/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
5093 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5094 return 2;
5095 }
5096
5097 -/*
5098 - * Only x86 and x86_64 have an arch_align_stack().
5099 - * All other arches have "#define arch_align_stack(x) (x)"
5100 - * in their asm/system.h
5101 - * As this is included in UML from asm-um/system-generic.h,
5102 - * we can use it to behave as the subarch does.
5103 - */
5104 -#ifndef arch_align_stack
5105 -unsigned long arch_align_stack(unsigned long sp)
5106 -{
5107 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5108 - sp -= get_random_int() % 8192;
5109 - return sp & ~0xf;
5110 -}
5111 -#endif
5112 -
5113 unsigned long get_wchan(struct task_struct *p)
5114 {
5115 unsigned long stack_page, sp, ip;
5116 diff -urNp linux-2.6.39.4/arch/um/sys-i386/syscalls.c linux-2.6.39.4/arch/um/sys-i386/syscalls.c
5117 --- linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-05-19 00:06:34.000000000 -0400
5118 +++ linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-08-05 19:44:33.000000000 -0400
5119 @@ -11,6 +11,21 @@
5120 #include "asm/uaccess.h"
5121 #include "asm/unistd.h"
5122
5123 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5124 +{
5125 + unsigned long pax_task_size = TASK_SIZE;
5126 +
5127 +#ifdef CONFIG_PAX_SEGMEXEC
5128 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5129 + pax_task_size = SEGMEXEC_TASK_SIZE;
5130 +#endif
5131 +
5132 + if (len > pax_task_size || addr > pax_task_size - len)
5133 + return -EINVAL;
5134 +
5135 + return 0;
5136 +}
5137 +
5138 /*
5139 * The prototype on i386 is:
5140 *
5141 diff -urNp linux-2.6.39.4/arch/x86/boot/bitops.h linux-2.6.39.4/arch/x86/boot/bitops.h
5142 --- linux-2.6.39.4/arch/x86/boot/bitops.h 2011-05-19 00:06:34.000000000 -0400
5143 +++ linux-2.6.39.4/arch/x86/boot/bitops.h 2011-08-05 19:44:33.000000000 -0400
5144 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5145 u8 v;
5146 const u32 *p = (const u32 *)addr;
5147
5148 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5149 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5150 return v;
5151 }
5152
5153 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5154
5155 static inline void set_bit(int nr, void *addr)
5156 {
5157 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5158 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5159 }
5160
5161 #endif /* BOOT_BITOPS_H */
5162 diff -urNp linux-2.6.39.4/arch/x86/boot/boot.h linux-2.6.39.4/arch/x86/boot/boot.h
5163 --- linux-2.6.39.4/arch/x86/boot/boot.h 2011-05-19 00:06:34.000000000 -0400
5164 +++ linux-2.6.39.4/arch/x86/boot/boot.h 2011-08-05 19:44:33.000000000 -0400
5165 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5166 static inline u16 ds(void)
5167 {
5168 u16 seg;
5169 - asm("movw %%ds,%0" : "=rm" (seg));
5170 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5171 return seg;
5172 }
5173
5174 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5175 static inline int memcmp(const void *s1, const void *s2, size_t len)
5176 {
5177 u8 diff;
5178 - asm("repe; cmpsb; setnz %0"
5179 + asm volatile("repe; cmpsb; setnz %0"
5180 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5181 return diff;
5182 }
5183 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_32.S linux-2.6.39.4/arch/x86/boot/compressed/head_32.S
5184 --- linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-05-19 00:06:34.000000000 -0400
5185 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-08-05 19:44:33.000000000 -0400
5186 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5187 notl %eax
5188 andl %eax, %ebx
5189 #else
5190 - movl $LOAD_PHYSICAL_ADDR, %ebx
5191 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5192 #endif
5193
5194 /* Target address to relocate to for decompression */
5195 @@ -162,7 +162,7 @@ relocated:
5196 * and where it was actually loaded.
5197 */
5198 movl %ebp, %ebx
5199 - subl $LOAD_PHYSICAL_ADDR, %ebx
5200 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5201 jz 2f /* Nothing to be done if loaded at compiled addr. */
5202 /*
5203 * Process relocations.
5204 @@ -170,8 +170,7 @@ relocated:
5205
5206 1: subl $4, %edi
5207 movl (%edi), %ecx
5208 - testl %ecx, %ecx
5209 - jz 2f
5210 + jecxz 2f
5211 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5212 jmp 1b
5213 2:
5214 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_64.S linux-2.6.39.4/arch/x86/boot/compressed/head_64.S
5215 --- linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-05-19 00:06:34.000000000 -0400
5216 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-08-05 19:44:33.000000000 -0400
5217 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5218 notl %eax
5219 andl %eax, %ebx
5220 #else
5221 - movl $LOAD_PHYSICAL_ADDR, %ebx
5222 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5223 #endif
5224
5225 /* Target address to relocate to for decompression */
5226 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5227 notq %rax
5228 andq %rax, %rbp
5229 #else
5230 - movq $LOAD_PHYSICAL_ADDR, %rbp
5231 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5232 #endif
5233
5234 /* Target address to relocate to for decompression */
5235 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/Makefile linux-2.6.39.4/arch/x86/boot/compressed/Makefile
5236 --- linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-05-19 00:06:34.000000000 -0400
5237 +++ linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-08-05 20:34:06.000000000 -0400
5238 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5239 KBUILD_CFLAGS += $(cflags-y)
5240 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5241 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5242 +ifdef CONSTIFY_PLUGIN
5243 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5244 +endif
5245
5246 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5247 GCOV_PROFILE := n
5248 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/misc.c linux-2.6.39.4/arch/x86/boot/compressed/misc.c
5249 --- linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-05-19 00:06:34.000000000 -0400
5250 +++ linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-08-05 19:44:33.000000000 -0400
5251 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5252 case PT_LOAD:
5253 #ifdef CONFIG_RELOCATABLE
5254 dest = output;
5255 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5256 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5257 #else
5258 dest = (void *)(phdr->p_paddr);
5259 #endif
5260 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5261 error("Destination address too large");
5262 #endif
5263 #ifndef CONFIG_RELOCATABLE
5264 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5265 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5266 error("Wrong destination address");
5267 #endif
5268
5269 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/relocs.c linux-2.6.39.4/arch/x86/boot/compressed/relocs.c
5270 --- linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-05-19 00:06:34.000000000 -0400
5271 +++ linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-08-05 19:44:33.000000000 -0400
5272 @@ -13,8 +13,11 @@
5273
5274 static void die(char *fmt, ...);
5275
5276 +#include "../../../../include/generated/autoconf.h"
5277 +
5278 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5279 static Elf32_Ehdr ehdr;
5280 +static Elf32_Phdr *phdr;
5281 static unsigned long reloc_count, reloc_idx;
5282 static unsigned long *relocs;
5283
5284 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5285 }
5286 }
5287
5288 +static void read_phdrs(FILE *fp)
5289 +{
5290 + unsigned int i;
5291 +
5292 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5293 + if (!phdr) {
5294 + die("Unable to allocate %d program headers\n",
5295 + ehdr.e_phnum);
5296 + }
5297 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5298 + die("Seek to %d failed: %s\n",
5299 + ehdr.e_phoff, strerror(errno));
5300 + }
5301 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5302 + die("Cannot read ELF program headers: %s\n",
5303 + strerror(errno));
5304 + }
5305 + for(i = 0; i < ehdr.e_phnum; i++) {
5306 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5307 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5308 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5309 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5310 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5311 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5312 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5313 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5314 + }
5315 +
5316 +}
5317 +
5318 static void read_shdrs(FILE *fp)
5319 {
5320 - int i;
5321 + unsigned int i;
5322 Elf32_Shdr shdr;
5323
5324 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5325 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5326
5327 static void read_strtabs(FILE *fp)
5328 {
5329 - int i;
5330 + unsigned int i;
5331 for (i = 0; i < ehdr.e_shnum; i++) {
5332 struct section *sec = &secs[i];
5333 if (sec->shdr.sh_type != SHT_STRTAB) {
5334 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5335
5336 static void read_symtabs(FILE *fp)
5337 {
5338 - int i,j;
5339 + unsigned int i,j;
5340 for (i = 0; i < ehdr.e_shnum; i++) {
5341 struct section *sec = &secs[i];
5342 if (sec->shdr.sh_type != SHT_SYMTAB) {
5343 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5344
5345 static void read_relocs(FILE *fp)
5346 {
5347 - int i,j;
5348 + unsigned int i,j;
5349 + uint32_t base;
5350 +
5351 for (i = 0; i < ehdr.e_shnum; i++) {
5352 struct section *sec = &secs[i];
5353 if (sec->shdr.sh_type != SHT_REL) {
5354 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5355 die("Cannot read symbol table: %s\n",
5356 strerror(errno));
5357 }
5358 + base = 0;
5359 + for (j = 0; j < ehdr.e_phnum; j++) {
5360 + if (phdr[j].p_type != PT_LOAD )
5361 + continue;
5362 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5363 + continue;
5364 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5365 + break;
5366 + }
5367 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5368 Elf32_Rel *rel = &sec->reltab[j];
5369 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5370 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5371 rel->r_info = elf32_to_cpu(rel->r_info);
5372 }
5373 }
5374 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5375
5376 static void print_absolute_symbols(void)
5377 {
5378 - int i;
5379 + unsigned int i;
5380 printf("Absolute symbols\n");
5381 printf(" Num: Value Size Type Bind Visibility Name\n");
5382 for (i = 0; i < ehdr.e_shnum; i++) {
5383 struct section *sec = &secs[i];
5384 char *sym_strtab;
5385 Elf32_Sym *sh_symtab;
5386 - int j;
5387 + unsigned int j;
5388
5389 if (sec->shdr.sh_type != SHT_SYMTAB) {
5390 continue;
5391 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5392
5393 static void print_absolute_relocs(void)
5394 {
5395 - int i, printed = 0;
5396 + unsigned int i, printed = 0;
5397
5398 for (i = 0; i < ehdr.e_shnum; i++) {
5399 struct section *sec = &secs[i];
5400 struct section *sec_applies, *sec_symtab;
5401 char *sym_strtab;
5402 Elf32_Sym *sh_symtab;
5403 - int j;
5404 + unsigned int j;
5405 if (sec->shdr.sh_type != SHT_REL) {
5406 continue;
5407 }
5408 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5409
5410 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5411 {
5412 - int i;
5413 + unsigned int i;
5414 /* Walk through the relocations */
5415 for (i = 0; i < ehdr.e_shnum; i++) {
5416 char *sym_strtab;
5417 Elf32_Sym *sh_symtab;
5418 struct section *sec_applies, *sec_symtab;
5419 - int j;
5420 + unsigned int j;
5421 struct section *sec = &secs[i];
5422
5423 if (sec->shdr.sh_type != SHT_REL) {
5424 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5425 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5426 continue;
5427 }
5428 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5429 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5430 + continue;
5431 +
5432 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5433 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5434 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5435 + continue;
5436 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5437 + continue;
5438 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5439 + continue;
5440 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5441 + continue;
5442 +#endif
5443 +
5444 switch (r_type) {
5445 case R_386_NONE:
5446 case R_386_PC32:
5447 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5448
5449 static void emit_relocs(int as_text)
5450 {
5451 - int i;
5452 + unsigned int i;
5453 /* Count how many relocations I have and allocate space for them. */
5454 reloc_count = 0;
5455 walk_relocs(count_reloc);
5456 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5457 fname, strerror(errno));
5458 }
5459 read_ehdr(fp);
5460 + read_phdrs(fp);
5461 read_shdrs(fp);
5462 read_strtabs(fp);
5463 read_symtabs(fp);
5464 diff -urNp linux-2.6.39.4/arch/x86/boot/cpucheck.c linux-2.6.39.4/arch/x86/boot/cpucheck.c
5465 --- linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-05-19 00:06:34.000000000 -0400
5466 +++ linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-08-05 19:44:33.000000000 -0400
5467 @@ -74,7 +74,7 @@ static int has_fpu(void)
5468 u16 fcw = -1, fsw = -1;
5469 u32 cr0;
5470
5471 - asm("movl %%cr0,%0" : "=r" (cr0));
5472 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5473 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5474 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5475 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5476 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5477 {
5478 u32 f0, f1;
5479
5480 - asm("pushfl ; "
5481 + asm volatile("pushfl ; "
5482 "pushfl ; "
5483 "popl %0 ; "
5484 "movl %0,%1 ; "
5485 @@ -115,7 +115,7 @@ static void get_flags(void)
5486 set_bit(X86_FEATURE_FPU, cpu.flags);
5487
5488 if (has_eflag(X86_EFLAGS_ID)) {
5489 - asm("cpuid"
5490 + asm volatile("cpuid"
5491 : "=a" (max_intel_level),
5492 "=b" (cpu_vendor[0]),
5493 "=d" (cpu_vendor[1]),
5494 @@ -124,7 +124,7 @@ static void get_flags(void)
5495
5496 if (max_intel_level >= 0x00000001 &&
5497 max_intel_level <= 0x0000ffff) {
5498 - asm("cpuid"
5499 + asm volatile("cpuid"
5500 : "=a" (tfms),
5501 "=c" (cpu.flags[4]),
5502 "=d" (cpu.flags[0])
5503 @@ -136,7 +136,7 @@ static void get_flags(void)
5504 cpu.model += ((tfms >> 16) & 0xf) << 4;
5505 }
5506
5507 - asm("cpuid"
5508 + asm volatile("cpuid"
5509 : "=a" (max_amd_level)
5510 : "a" (0x80000000)
5511 : "ebx", "ecx", "edx");
5512 @@ -144,7 +144,7 @@ static void get_flags(void)
5513 if (max_amd_level >= 0x80000001 &&
5514 max_amd_level <= 0x8000ffff) {
5515 u32 eax = 0x80000001;
5516 - asm("cpuid"
5517 + asm volatile("cpuid"
5518 : "+a" (eax),
5519 "=c" (cpu.flags[6]),
5520 "=d" (cpu.flags[1])
5521 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5522 u32 ecx = MSR_K7_HWCR;
5523 u32 eax, edx;
5524
5525 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5526 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5527 eax &= ~(1 << 15);
5528 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5530
5531 get_flags(); /* Make sure it really did something */
5532 err = check_flags();
5533 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5534 u32 ecx = MSR_VIA_FCR;
5535 u32 eax, edx;
5536
5537 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5538 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5539 eax |= (1<<1)|(1<<7);
5540 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5541 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5542
5543 set_bit(X86_FEATURE_CX8, cpu.flags);
5544 err = check_flags();
5545 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5546 u32 eax, edx;
5547 u32 level = 1;
5548
5549 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5550 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5551 - asm("cpuid"
5552 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5553 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5554 + asm volatile("cpuid"
5555 : "+a" (level), "=d" (cpu.flags[0])
5556 : : "ecx", "ebx");
5557 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5558 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5559
5560 err = check_flags();
5561 }
5562 diff -urNp linux-2.6.39.4/arch/x86/boot/header.S linux-2.6.39.4/arch/x86/boot/header.S
5563 --- linux-2.6.39.4/arch/x86/boot/header.S 2011-05-19 00:06:34.000000000 -0400
5564 +++ linux-2.6.39.4/arch/x86/boot/header.S 2011-08-05 19:44:33.000000000 -0400
5565 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5566 # single linked list of
5567 # struct setup_data
5568
5569 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5570 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5571
5572 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5573 #define VO_INIT_SIZE (VO__end - VO__text)
5574 diff -urNp linux-2.6.39.4/arch/x86/boot/Makefile linux-2.6.39.4/arch/x86/boot/Makefile
5575 --- linux-2.6.39.4/arch/x86/boot/Makefile 2011-05-19 00:06:34.000000000 -0400
5576 +++ linux-2.6.39.4/arch/x86/boot/Makefile 2011-08-05 20:34:06.000000000 -0400
5577 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5578 $(call cc-option, -fno-stack-protector) \
5579 $(call cc-option, -mpreferred-stack-boundary=2)
5580 KBUILD_CFLAGS += $(call cc-option, -m32)
5581 +ifdef CONSTIFY_PLUGIN
5582 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5583 +endif
5584 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5585 GCOV_PROFILE := n
5586
5587 diff -urNp linux-2.6.39.4/arch/x86/boot/memory.c linux-2.6.39.4/arch/x86/boot/memory.c
5588 --- linux-2.6.39.4/arch/x86/boot/memory.c 2011-05-19 00:06:34.000000000 -0400
5589 +++ linux-2.6.39.4/arch/x86/boot/memory.c 2011-08-05 19:44:33.000000000 -0400
5590 @@ -19,7 +19,7 @@
5591
5592 static int detect_memory_e820(void)
5593 {
5594 - int count = 0;
5595 + unsigned int count = 0;
5596 struct biosregs ireg, oreg;
5597 struct e820entry *desc = boot_params.e820_map;
5598 static struct e820entry buf; /* static so it is zeroed */
5599 diff -urNp linux-2.6.39.4/arch/x86/boot/video.c linux-2.6.39.4/arch/x86/boot/video.c
5600 --- linux-2.6.39.4/arch/x86/boot/video.c 2011-05-19 00:06:34.000000000 -0400
5601 +++ linux-2.6.39.4/arch/x86/boot/video.c 2011-08-05 19:44:33.000000000 -0400
5602 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5603 static unsigned int get_entry(void)
5604 {
5605 char entry_buf[4];
5606 - int i, len = 0;
5607 + unsigned int i, len = 0;
5608 int key;
5609 unsigned int v;
5610
5611 diff -urNp linux-2.6.39.4/arch/x86/boot/video-vesa.c linux-2.6.39.4/arch/x86/boot/video-vesa.c
5612 --- linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-05-19 00:06:34.000000000 -0400
5613 +++ linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-08-05 19:44:33.000000000 -0400
5614 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5615
5616 boot_params.screen_info.vesapm_seg = oreg.es;
5617 boot_params.screen_info.vesapm_off = oreg.di;
5618 + boot_params.screen_info.vesapm_size = oreg.cx;
5619 }
5620
5621 /*
5622 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_aout.c linux-2.6.39.4/arch/x86/ia32/ia32_aout.c
5623 --- linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-05-19 00:06:34.000000000 -0400
5624 +++ linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-08-05 19:44:33.000000000 -0400
5625 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5626 unsigned long dump_start, dump_size;
5627 struct user32 dump;
5628
5629 + memset(&dump, 0, sizeof(dump));
5630 +
5631 fs = get_fs();
5632 set_fs(KERNEL_DS);
5633 has_dumped = 1;
5634 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32entry.S linux-2.6.39.4/arch/x86/ia32/ia32entry.S
5635 --- linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-05-19 00:06:34.000000000 -0400
5636 +++ linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-08-05 19:44:33.000000000 -0400
5637 @@ -13,6 +13,7 @@
5638 #include <asm/thread_info.h>
5639 #include <asm/segment.h>
5640 #include <asm/irqflags.h>
5641 +#include <asm/pgtable.h>
5642 #include <linux/linkage.h>
5643
5644 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5645 @@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
5646 ENDPROC(native_irq_enable_sysexit)
5647 #endif
5648
5649 + .macro pax_enter_kernel_user
5650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5651 + call pax_enter_kernel_user
5652 +#endif
5653 + .endm
5654 +
5655 + .macro pax_exit_kernel_user
5656 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5657 + call pax_exit_kernel_user
5658 +#endif
5659 +#ifdef CONFIG_PAX_RANDKSTACK
5660 + pushq %rax
5661 + call pax_randomize_kstack
5662 + popq %rax
5663 +#endif
5664 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5665 + call pax_erase_kstack
5666 +#endif
5667 + .endm
5668 +
5669 + .macro pax_erase_kstack
5670 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5671 + call pax_erase_kstack
5672 +#endif
5673 + .endm
5674 +
5675 /*
5676 * 32bit SYSENTER instruction entry.
5677 *
5678 @@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
5679 CFI_REGISTER rsp,rbp
5680 SWAPGS_UNSAFE_STACK
5681 movq PER_CPU_VAR(kernel_stack), %rsp
5682 - addq $(KERNEL_STACK_OFFSET),%rsp
5683 + pax_enter_kernel_user
5684 /*
5685 * No need to follow this irqs on/off section: the syscall
5686 * disabled irqs, here we enable it straight after entry:
5687 @@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
5688 CFI_REL_OFFSET rsp,0
5689 pushfq_cfi
5690 /*CFI_REL_OFFSET rflags,0*/
5691 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5692 + GET_THREAD_INFO(%r10)
5693 + movl TI_sysenter_return(%r10), %r10d
5694 CFI_REGISTER rip,r10
5695 pushq_cfi $__USER32_CS
5696 /*CFI_REL_OFFSET cs,0*/
5697 @@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
5698 SAVE_ARGS 0,0,1
5699 /* no need to do an access_ok check here because rbp has been
5700 32bit zero extended */
5701 +
5702 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5703 + mov $PAX_USER_SHADOW_BASE,%r10
5704 + add %r10,%rbp
5705 +#endif
5706 +
5707 1: movl (%rbp),%ebp
5708 .section __ex_table,"a"
5709 .quad 1b,ia32_badarg
5710 @@ -168,6 +202,7 @@ sysenter_dispatch:
5711 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5712 jnz sysexit_audit
5713 sysexit_from_sys_call:
5714 + pax_exit_kernel_user
5715 andl $~TS_COMPAT,TI_status(%r10)
5716 /* clear IF, that popfq doesn't enable interrupts early */
5717 andl $~0x200,EFLAGS-R11(%rsp)
5718 @@ -194,6 +229,9 @@ sysexit_from_sys_call:
5719 movl %eax,%esi /* 2nd arg: syscall number */
5720 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5721 call audit_syscall_entry
5722 +
5723 + pax_erase_kstack
5724 +
5725 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5726 cmpq $(IA32_NR_syscalls-1),%rax
5727 ja ia32_badsys
5728 @@ -246,6 +284,9 @@ sysenter_tracesys:
5729 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5730 movq %rsp,%rdi /* &pt_regs -> arg1 */
5731 call syscall_trace_enter
5732 +
5733 + pax_erase_kstack
5734 +
5735 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5736 RESTORE_REST
5737 cmpq $(IA32_NR_syscalls-1),%rax
5738 @@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
5739 ENTRY(ia32_cstar_target)
5740 CFI_STARTPROC32 simple
5741 CFI_SIGNAL_FRAME
5742 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5743 + CFI_DEF_CFA rsp,0
5744 CFI_REGISTER rip,rcx
5745 /*CFI_REGISTER rflags,r11*/
5746 SWAPGS_UNSAFE_STACK
5747 movl %esp,%r8d
5748 CFI_REGISTER rsp,r8
5749 movq PER_CPU_VAR(kernel_stack),%rsp
5750 +
5751 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5752 + pax_enter_kernel_user
5753 +#endif
5754 +
5755 /*
5756 * No need to follow this irqs on/off section: the syscall
5757 * disabled irqs and here we enable it straight after entry:
5758 */
5759 ENABLE_INTERRUPTS(CLBR_NONE)
5760 - SAVE_ARGS 8,1,1
5761 + SAVE_ARGS 8*6,1,1
5762 movl %eax,%eax /* zero extension */
5763 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5764 movq %rcx,RIP-ARGOFFSET(%rsp)
5765 @@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
5766 /* no need to do an access_ok check here because r8 has been
5767 32bit zero extended */
5768 /* hardware stack frame is complete now */
5769 +
5770 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5771 + mov $PAX_USER_SHADOW_BASE,%r10
5772 + add %r10,%r8
5773 +#endif
5774 +
5775 1: movl (%r8),%r9d
5776 .section __ex_table,"a"
5777 .quad 1b,ia32_badarg
5778 @@ -327,6 +379,7 @@ cstar_dispatch:
5779 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5780 jnz sysretl_audit
5781 sysretl_from_sys_call:
5782 + pax_exit_kernel_user
5783 andl $~TS_COMPAT,TI_status(%r10)
5784 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5785 movl RIP-ARGOFFSET(%rsp),%ecx
5786 @@ -364,6 +417,9 @@ cstar_tracesys:
5787 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5788 movq %rsp,%rdi /* &pt_regs -> arg1 */
5789 call syscall_trace_enter
5790 +
5791 + pax_erase_kstack
5792 +
5793 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5794 RESTORE_REST
5795 xchgl %ebp,%r9d
5796 @@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5797 CFI_REL_OFFSET rip,RIP-RIP
5798 PARAVIRT_ADJUST_EXCEPTION_FRAME
5799 SWAPGS
5800 + pax_enter_kernel_user
5801 /*
5802 * No need to follow this irqs on/off section: the syscall
5803 * disabled irqs and here we enable it straight after entry:
5804 @@ -441,6 +498,9 @@ ia32_tracesys:
5805 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5806 movq %rsp,%rdi /* &pt_regs -> arg1 */
5807 call syscall_trace_enter
5808 +
5809 + pax_erase_kstack
5810 +
5811 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5812 RESTORE_REST
5813 cmpq $(IA32_NR_syscalls-1),%rax
5814 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_signal.c linux-2.6.39.4/arch/x86/ia32/ia32_signal.c
5815 --- linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-05-19 00:06:34.000000000 -0400
5816 +++ linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-08-05 19:44:33.000000000 -0400
5817 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5818 sp -= frame_size;
5819 /* Align the stack pointer according to the i386 ABI,
5820 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5821 - sp = ((sp + 4) & -16ul) - 4;
5822 + sp = ((sp - 12) & -16ul) - 4;
5823 return (void __user *) sp;
5824 }
5825
5826 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5827 * These are actually not used anymore, but left because some
5828 * gdb versions depend on them as a marker.
5829 */
5830 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5836 0xb8,
5837 __NR_ia32_rt_sigreturn,
5838 0x80cd,
5839 - 0,
5840 + 0
5841 };
5842
5843 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5844 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5845
5846 if (ka->sa.sa_flags & SA_RESTORER)
5847 restorer = ka->sa.sa_restorer;
5848 + else if (current->mm->context.vdso)
5849 + /* Return stub is in 32bit vsyscall page */
5850 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5851 else
5852 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5853 - rt_sigreturn);
5854 + restorer = &frame->retcode;
5855 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5856
5857 /*
5858 * Not actually used anymore, but left because some gdb
5859 * versions need it.
5860 */
5861 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5862 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5863 } put_user_catch(err);
5864
5865 if (err)
5866 diff -urNp linux-2.6.39.4/arch/x86/include/asm/alternative.h linux-2.6.39.4/arch/x86/include/asm/alternative.h
5867 --- linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-05-19 00:06:34.000000000 -0400
5868 +++ linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-08-05 19:44:33.000000000 -0400
5869 @@ -94,7 +94,7 @@ static inline int alternatives_text_rese
5870 ".section .discard,\"aw\",@progbits\n" \
5871 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5872 ".previous\n" \
5873 - ".section .altinstr_replacement, \"ax\"\n" \
5874 + ".section .altinstr_replacement, \"a\"\n" \
5875 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5876 ".previous"
5877
5878 diff -urNp linux-2.6.39.4/arch/x86/include/asm/apic.h linux-2.6.39.4/arch/x86/include/asm/apic.h
5879 --- linux-2.6.39.4/arch/x86/include/asm/apic.h 2011-05-19 00:06:34.000000000 -0400
5880 +++ linux-2.6.39.4/arch/x86/include/asm/apic.h 2011-08-17 20:01:35.000000000 -0400
5881 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5882
5883 #ifdef CONFIG_X86_LOCAL_APIC
5884
5885 -extern unsigned int apic_verbosity;
5886 +extern int apic_verbosity;
5887 extern int local_apic_timer_c2_ok;
5888
5889 extern int disable_apic;
5890 diff -urNp linux-2.6.39.4/arch/x86/include/asm/apm.h linux-2.6.39.4/arch/x86/include/asm/apm.h
5891 --- linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-05-19 00:06:34.000000000 -0400
5892 +++ linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-08-05 19:44:33.000000000 -0400
5893 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5894 __asm__ __volatile__(APM_DO_ZERO_SEGS
5895 "pushl %%edi\n\t"
5896 "pushl %%ebp\n\t"
5897 - "lcall *%%cs:apm_bios_entry\n\t"
5898 + "lcall *%%ss:apm_bios_entry\n\t"
5899 "setc %%al\n\t"
5900 "popl %%ebp\n\t"
5901 "popl %%edi\n\t"
5902 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5903 __asm__ __volatile__(APM_DO_ZERO_SEGS
5904 "pushl %%edi\n\t"
5905 "pushl %%ebp\n\t"
5906 - "lcall *%%cs:apm_bios_entry\n\t"
5907 + "lcall *%%ss:apm_bios_entry\n\t"
5908 "setc %%bl\n\t"
5909 "popl %%ebp\n\t"
5910 "popl %%edi\n\t"
5911 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h
5912 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-05-19 00:06:34.000000000 -0400
5913 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-08-05 19:44:33.000000000 -0400
5914 @@ -12,6 +12,14 @@ typedef struct {
5915 u64 __aligned(8) counter;
5916 } atomic64_t;
5917
5918 +#ifdef CONFIG_PAX_REFCOUNT
5919 +typedef struct {
5920 + u64 __aligned(8) counter;
5921 +} atomic64_unchecked_t;
5922 +#else
5923 +typedef atomic64_t atomic64_unchecked_t;
5924 +#endif
5925 +
5926 #define ATOMIC64_INIT(val) { (val) }
5927
5928 #ifdef CONFIG_X86_CMPXCHG64
5929 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5930 }
5931
5932 /**
5933 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5934 + * @p: pointer to type atomic64_unchecked_t
5935 + * @o: expected value
5936 + * @n: new value
5937 + *
5938 + * Atomically sets @v to @n if it was equal to @o and returns
5939 + * the old value.
5940 + */
5941 +
5942 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5943 +{
5944 + return cmpxchg64(&v->counter, o, n);
5945 +}
5946 +
5947 +/**
5948 * atomic64_xchg - xchg atomic64 variable
5949 * @v: pointer to type atomic64_t
5950 * @n: value to assign
5951 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5952 }
5953
5954 /**
5955 + * atomic64_set_unchecked - set atomic64 variable
5956 + * @v: pointer to type atomic64_unchecked_t
5957 + * @n: value to assign
5958 + *
5959 + * Atomically sets the value of @v to @n.
5960 + */
5961 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5962 +{
5963 + unsigned high = (unsigned)(i >> 32);
5964 + unsigned low = (unsigned)i;
5965 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5966 + : "+b" (low), "+c" (high)
5967 + : "S" (v)
5968 + : "eax", "edx", "memory"
5969 + );
5970 +}
5971 +
5972 +/**
5973 * atomic64_read - read atomic64 variable
5974 * @v: pointer to type atomic64_t
5975 *
5976 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5977 }
5978
5979 /**
5980 + * atomic64_read_unchecked - read atomic64 variable
5981 + * @v: pointer to type atomic64_unchecked_t
5982 + *
5983 + * Atomically reads the value of @v and returns it.
5984 + */
5985 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5986 +{
5987 + long long r;
5988 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5989 + : "=A" (r), "+c" (v)
5990 + : : "memory"
5991 + );
5992 + return r;
5993 + }
5994 +
5995 +/**
5996 * atomic64_add_return - add and return
5997 * @i: integer value to add
5998 * @v: pointer to type atomic64_t
5999 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6000 return i;
6001 }
6002
6003 +/**
6004 + * atomic64_add_return_unchecked - add and return
6005 + * @i: integer value to add
6006 + * @v: pointer to type atomic64_unchecked_t
6007 + *
6008 + * Atomically adds @i to @v and returns @i + *@v
6009 + */
6010 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6011 +{
6012 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6013 + : "+A" (i), "+c" (v)
6014 + : : "memory"
6015 + );
6016 + return i;
6017 +}
6018 +
6019 /*
6020 * Other variants with different arithmetic operators:
6021 */
6022 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6023 return a;
6024 }
6025
6026 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6027 +{
6028 + long long a;
6029 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6030 + : "=A" (a)
6031 + : "S" (v)
6032 + : "memory", "ecx"
6033 + );
6034 + return a;
6035 +}
6036 +
6037 static inline long long atomic64_dec_return(atomic64_t *v)
6038 {
6039 long long a;
6040 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6041 }
6042
6043 /**
6044 + * atomic64_add_unchecked - add integer to atomic64 variable
6045 + * @i: integer value to add
6046 + * @v: pointer to type atomic64_unchecked_t
6047 + *
6048 + * Atomically adds @i to @v.
6049 + */
6050 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6051 +{
6052 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6053 + : "+A" (i), "+c" (v)
6054 + : : "memory"
6055 + );
6056 + return i;
6057 +}
6058 +
6059 +/**
6060 * atomic64_sub - subtract the atomic64 variable
6061 * @i: integer value to subtract
6062 * @v: pointer to type atomic64_t
6063 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h
6064 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-05-19 00:06:34.000000000 -0400
6065 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-08-05 19:44:33.000000000 -0400
6066 @@ -18,7 +18,19 @@
6067 */
6068 static inline long atomic64_read(const atomic64_t *v)
6069 {
6070 - return (*(volatile long *)&(v)->counter);
6071 + return (*(volatile const long *)&(v)->counter);
6072 +}
6073 +
6074 +/**
6075 + * atomic64_read_unchecked - read atomic64 variable
6076 + * @v: pointer of type atomic64_unchecked_t
6077 + *
6078 + * Atomically reads the value of @v.
6079 + * Doesn't imply a read memory barrier.
6080 + */
6081 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6082 +{
6083 + return (*(volatile const long *)&(v)->counter);
6084 }
6085
6086 /**
6087 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6088 }
6089
6090 /**
6091 + * atomic64_set_unchecked - set atomic64 variable
6092 + * @v: pointer to type atomic64_unchecked_t
6093 + * @i: required value
6094 + *
6095 + * Atomically sets the value of @v to @i.
6096 + */
6097 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6098 +{
6099 + v->counter = i;
6100 +}
6101 +
6102 +/**
6103 * atomic64_add - add integer to atomic64 variable
6104 * @i: integer value to add
6105 * @v: pointer to type atomic64_t
6106 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6107 */
6108 static inline void atomic64_add(long i, atomic64_t *v)
6109 {
6110 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6111 +
6112 +#ifdef CONFIG_PAX_REFCOUNT
6113 + "jno 0f\n"
6114 + LOCK_PREFIX "subq %1,%0\n"
6115 + "int $4\n0:\n"
6116 + _ASM_EXTABLE(0b, 0b)
6117 +#endif
6118 +
6119 + : "=m" (v->counter)
6120 + : "er" (i), "m" (v->counter));
6121 +}
6122 +
6123 +/**
6124 + * atomic64_add_unchecked - add integer to atomic64 variable
6125 + * @i: integer value to add
6126 + * @v: pointer to type atomic64_unchecked_t
6127 + *
6128 + * Atomically adds @i to @v.
6129 + */
6130 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6131 +{
6132 asm volatile(LOCK_PREFIX "addq %1,%0"
6133 : "=m" (v->counter)
6134 : "er" (i), "m" (v->counter));
6135 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6136 */
6137 static inline void atomic64_sub(long i, atomic64_t *v)
6138 {
6139 - asm volatile(LOCK_PREFIX "subq %1,%0"
6140 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6141 +
6142 +#ifdef CONFIG_PAX_REFCOUNT
6143 + "jno 0f\n"
6144 + LOCK_PREFIX "addq %1,%0\n"
6145 + "int $4\n0:\n"
6146 + _ASM_EXTABLE(0b, 0b)
6147 +#endif
6148 +
6149 + : "=m" (v->counter)
6150 + : "er" (i), "m" (v->counter));
6151 +}
6152 +
6153 +/**
6154 + * atomic64_sub_unchecked - subtract the atomic64 variable
6155 + * @i: integer value to subtract
6156 + * @v: pointer to type atomic64_unchecked_t
6157 + *
6158 + * Atomically subtracts @i from @v.
6159 + */
6160 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6161 +{
6162 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6163 : "=m" (v->counter)
6164 : "er" (i), "m" (v->counter));
6165 }
6166 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6167 {
6168 unsigned char c;
6169
6170 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6171 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6172 +
6173 +#ifdef CONFIG_PAX_REFCOUNT
6174 + "jno 0f\n"
6175 + LOCK_PREFIX "addq %2,%0\n"
6176 + "int $4\n0:\n"
6177 + _ASM_EXTABLE(0b, 0b)
6178 +#endif
6179 +
6180 + "sete %1\n"
6181 : "=m" (v->counter), "=qm" (c)
6182 : "er" (i), "m" (v->counter) : "memory");
6183 return c;
6184 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6185 */
6186 static inline void atomic64_inc(atomic64_t *v)
6187 {
6188 + asm volatile(LOCK_PREFIX "incq %0\n"
6189 +
6190 +#ifdef CONFIG_PAX_REFCOUNT
6191 + "jno 0f\n"
6192 + LOCK_PREFIX "decq %0\n"
6193 + "int $4\n0:\n"
6194 + _ASM_EXTABLE(0b, 0b)
6195 +#endif
6196 +
6197 + : "=m" (v->counter)
6198 + : "m" (v->counter));
6199 +}
6200 +
6201 +/**
6202 + * atomic64_inc_unchecked - increment atomic64 variable
6203 + * @v: pointer to type atomic64_unchecked_t
6204 + *
6205 + * Atomically increments @v by 1.
6206 + */
6207 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6208 +{
6209 asm volatile(LOCK_PREFIX "incq %0"
6210 : "=m" (v->counter)
6211 : "m" (v->counter));
6212 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6213 */
6214 static inline void atomic64_dec(atomic64_t *v)
6215 {
6216 - asm volatile(LOCK_PREFIX "decq %0"
6217 + asm volatile(LOCK_PREFIX "decq %0\n"
6218 +
6219 +#ifdef CONFIG_PAX_REFCOUNT
6220 + "jno 0f\n"
6221 + LOCK_PREFIX "incq %0\n"
6222 + "int $4\n0:\n"
6223 + _ASM_EXTABLE(0b, 0b)
6224 +#endif
6225 +
6226 + : "=m" (v->counter)
6227 + : "m" (v->counter));
6228 +}
6229 +
6230 +/**
6231 + * atomic64_dec_unchecked - decrement atomic64 variable
6232 + * @v: pointer to type atomic64_t
6233 + *
6234 + * Atomically decrements @v by 1.
6235 + */
6236 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6237 +{
6238 + asm volatile(LOCK_PREFIX "decq %0\n"
6239 : "=m" (v->counter)
6240 : "m" (v->counter));
6241 }
6242 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6243 {
6244 unsigned char c;
6245
6246 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6247 + asm volatile(LOCK_PREFIX "decq %0\n"
6248 +
6249 +#ifdef CONFIG_PAX_REFCOUNT
6250 + "jno 0f\n"
6251 + LOCK_PREFIX "incq %0\n"
6252 + "int $4\n0:\n"
6253 + _ASM_EXTABLE(0b, 0b)
6254 +#endif
6255 +
6256 + "sete %1\n"
6257 : "=m" (v->counter), "=qm" (c)
6258 : "m" (v->counter) : "memory");
6259 return c != 0;
6260 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6261 {
6262 unsigned char c;
6263
6264 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6265 + asm volatile(LOCK_PREFIX "incq %0\n"
6266 +
6267 +#ifdef CONFIG_PAX_REFCOUNT
6268 + "jno 0f\n"
6269 + LOCK_PREFIX "decq %0\n"
6270 + "int $4\n0:\n"
6271 + _ASM_EXTABLE(0b, 0b)
6272 +#endif
6273 +
6274 + "sete %1\n"
6275 : "=m" (v->counter), "=qm" (c)
6276 : "m" (v->counter) : "memory");
6277 return c != 0;
6278 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6279 {
6280 unsigned char c;
6281
6282 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6283 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6284 +
6285 +#ifdef CONFIG_PAX_REFCOUNT
6286 + "jno 0f\n"
6287 + LOCK_PREFIX "subq %2,%0\n"
6288 + "int $4\n0:\n"
6289 + _ASM_EXTABLE(0b, 0b)
6290 +#endif
6291 +
6292 + "sets %1\n"
6293 : "=m" (v->counter), "=qm" (c)
6294 : "er" (i), "m" (v->counter) : "memory");
6295 return c;
6296 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6297 static inline long atomic64_add_return(long i, atomic64_t *v)
6298 {
6299 long __i = i;
6300 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6301 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6302 +
6303 +#ifdef CONFIG_PAX_REFCOUNT
6304 + "jno 0f\n"
6305 + "movq %0, %1\n"
6306 + "int $4\n0:\n"
6307 + _ASM_EXTABLE(0b, 0b)
6308 +#endif
6309 +
6310 + : "+r" (i), "+m" (v->counter)
6311 + : : "memory");
6312 + return i + __i;
6313 +}
6314 +
6315 +/**
6316 + * atomic64_add_return_unchecked - add and return
6317 + * @i: integer value to add
6318 + * @v: pointer to type atomic64_unchecked_t
6319 + *
6320 + * Atomically adds @i to @v and returns @i + @v
6321 + */
6322 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6323 +{
6324 + long __i = i;
6325 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6326 : "+r" (i), "+m" (v->counter)
6327 : : "memory");
6328 return i + __i;
6329 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6330 }
6331
6332 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6333 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6334 +{
6335 + return atomic64_add_return_unchecked(1, v);
6336 +}
6337 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6338
6339 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6340 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6341 return cmpxchg(&v->counter, old, new);
6342 }
6343
6344 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6345 +{
6346 + return cmpxchg(&v->counter, old, new);
6347 +}
6348 +
6349 static inline long atomic64_xchg(atomic64_t *v, long new)
6350 {
6351 return xchg(&v->counter, new);
6352 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6353 */
6354 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6355 {
6356 - long c, old;
6357 + long c, old, new;
6358 c = atomic64_read(v);
6359 for (;;) {
6360 - if (unlikely(c == (u)))
6361 + if (unlikely(c == u))
6362 break;
6363 - old = atomic64_cmpxchg((v), c, c + (a));
6364 +
6365 + asm volatile("add %2,%0\n"
6366 +
6367 +#ifdef CONFIG_PAX_REFCOUNT
6368 + "jno 0f\n"
6369 + "sub %2,%0\n"
6370 + "int $4\n0:\n"
6371 + _ASM_EXTABLE(0b, 0b)
6372 +#endif
6373 +
6374 + : "=r" (new)
6375 + : "0" (c), "ir" (a));
6376 +
6377 + old = atomic64_cmpxchg(v, c, new);
6378 if (likely(old == c))
6379 break;
6380 c = old;
6381 }
6382 - return c != (u);
6383 + return c != u;
6384 }
6385
6386 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6387 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic.h linux-2.6.39.4/arch/x86/include/asm/atomic.h
6388 --- linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-05-19 00:06:34.000000000 -0400
6389 +++ linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-08-05 19:44:33.000000000 -0400
6390 @@ -22,7 +22,18 @@
6391 */
6392 static inline int atomic_read(const atomic_t *v)
6393 {
6394 - return (*(volatile int *)&(v)->counter);
6395 + return (*(volatile const int *)&(v)->counter);
6396 +}
6397 +
6398 +/**
6399 + * atomic_read_unchecked - read atomic variable
6400 + * @v: pointer of type atomic_unchecked_t
6401 + *
6402 + * Atomically reads the value of @v.
6403 + */
6404 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6405 +{
6406 + return (*(volatile const int *)&(v)->counter);
6407 }
6408
6409 /**
6410 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6411 }
6412
6413 /**
6414 + * atomic_set_unchecked - set atomic variable
6415 + * @v: pointer of type atomic_unchecked_t
6416 + * @i: required value
6417 + *
6418 + * Atomically sets the value of @v to @i.
6419 + */
6420 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6421 +{
6422 + v->counter = i;
6423 +}
6424 +
6425 +/**
6426 * atomic_add - add integer to atomic variable
6427 * @i: integer value to add
6428 * @v: pointer of type atomic_t
6429 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6430 */
6431 static inline void atomic_add(int i, atomic_t *v)
6432 {
6433 - asm volatile(LOCK_PREFIX "addl %1,%0"
6434 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6435 +
6436 +#ifdef CONFIG_PAX_REFCOUNT
6437 + "jno 0f\n"
6438 + LOCK_PREFIX "subl %1,%0\n"
6439 + "int $4\n0:\n"
6440 + _ASM_EXTABLE(0b, 0b)
6441 +#endif
6442 +
6443 + : "+m" (v->counter)
6444 + : "ir" (i));
6445 +}
6446 +
6447 +/**
6448 + * atomic_add_unchecked - add integer to atomic variable
6449 + * @i: integer value to add
6450 + * @v: pointer of type atomic_unchecked_t
6451 + *
6452 + * Atomically adds @i to @v.
6453 + */
6454 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6455 +{
6456 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6457 : "+m" (v->counter)
6458 : "ir" (i));
6459 }
6460 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6461 */
6462 static inline void atomic_sub(int i, atomic_t *v)
6463 {
6464 - asm volatile(LOCK_PREFIX "subl %1,%0"
6465 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6466 +
6467 +#ifdef CONFIG_PAX_REFCOUNT
6468 + "jno 0f\n"
6469 + LOCK_PREFIX "addl %1,%0\n"
6470 + "int $4\n0:\n"
6471 + _ASM_EXTABLE(0b, 0b)
6472 +#endif
6473 +
6474 + : "+m" (v->counter)
6475 + : "ir" (i));
6476 +}
6477 +
6478 +/**
6479 + * atomic_sub_unchecked - subtract integer from atomic variable
6480 + * @i: integer value to subtract
6481 + * @v: pointer of type atomic_unchecked_t
6482 + *
6483 + * Atomically subtracts @i from @v.
6484 + */
6485 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6486 +{
6487 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6488 : "+m" (v->counter)
6489 : "ir" (i));
6490 }
6491 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6492 {
6493 unsigned char c;
6494
6495 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6496 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6497 +
6498 +#ifdef CONFIG_PAX_REFCOUNT
6499 + "jno 0f\n"
6500 + LOCK_PREFIX "addl %2,%0\n"
6501 + "int $4\n0:\n"
6502 + _ASM_EXTABLE(0b, 0b)
6503 +#endif
6504 +
6505 + "sete %1\n"
6506 : "+m" (v->counter), "=qm" (c)
6507 : "ir" (i) : "memory");
6508 return c;
6509 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6510 */
6511 static inline void atomic_inc(atomic_t *v)
6512 {
6513 - asm volatile(LOCK_PREFIX "incl %0"
6514 + asm volatile(LOCK_PREFIX "incl %0\n"
6515 +
6516 +#ifdef CONFIG_PAX_REFCOUNT
6517 + "jno 0f\n"
6518 + LOCK_PREFIX "decl %0\n"
6519 + "int $4\n0:\n"
6520 + _ASM_EXTABLE(0b, 0b)
6521 +#endif
6522 +
6523 + : "+m" (v->counter));
6524 +}
6525 +
6526 +/**
6527 + * atomic_inc_unchecked - increment atomic variable
6528 + * @v: pointer of type atomic_unchecked_t
6529 + *
6530 + * Atomically increments @v by 1.
6531 + */
6532 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6533 +{
6534 + asm volatile(LOCK_PREFIX "incl %0\n"
6535 : "+m" (v->counter));
6536 }
6537
6538 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6539 */
6540 static inline void atomic_dec(atomic_t *v)
6541 {
6542 - asm volatile(LOCK_PREFIX "decl %0"
6543 + asm volatile(LOCK_PREFIX "decl %0\n"
6544 +
6545 +#ifdef CONFIG_PAX_REFCOUNT
6546 + "jno 0f\n"
6547 + LOCK_PREFIX "incl %0\n"
6548 + "int $4\n0:\n"
6549 + _ASM_EXTABLE(0b, 0b)
6550 +#endif
6551 +
6552 + : "+m" (v->counter));
6553 +}
6554 +
6555 +/**
6556 + * atomic_dec_unchecked - decrement atomic variable
6557 + * @v: pointer of type atomic_unchecked_t
6558 + *
6559 + * Atomically decrements @v by 1.
6560 + */
6561 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6562 +{
6563 + asm volatile(LOCK_PREFIX "decl %0\n"
6564 : "+m" (v->counter));
6565 }
6566
6567 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6568 {
6569 unsigned char c;
6570
6571 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6572 + asm volatile(LOCK_PREFIX "decl %0\n"
6573 +
6574 +#ifdef CONFIG_PAX_REFCOUNT
6575 + "jno 0f\n"
6576 + LOCK_PREFIX "incl %0\n"
6577 + "int $4\n0:\n"
6578 + _ASM_EXTABLE(0b, 0b)
6579 +#endif
6580 +
6581 + "sete %1\n"
6582 : "+m" (v->counter), "=qm" (c)
6583 : : "memory");
6584 return c != 0;
6585 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6586 {
6587 unsigned char c;
6588
6589 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6590 + asm volatile(LOCK_PREFIX "incl %0\n"
6591 +
6592 +#ifdef CONFIG_PAX_REFCOUNT
6593 + "jno 0f\n"
6594 + LOCK_PREFIX "decl %0\n"
6595 + "int $4\n0:\n"
6596 + _ASM_EXTABLE(0b, 0b)
6597 +#endif
6598 +
6599 + "sete %1\n"
6600 + : "+m" (v->counter), "=qm" (c)
6601 + : : "memory");
6602 + return c != 0;
6603 +}
6604 +
6605 +/**
6606 + * atomic_inc_and_test_unchecked - increment and test
6607 + * @v: pointer of type atomic_unchecked_t
6608 + *
6609 + * Atomically increments @v by 1
6610 + * and returns true if the result is zero, or false for all
6611 + * other cases.
6612 + */
6613 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6614 +{
6615 + unsigned char c;
6616 +
6617 + asm volatile(LOCK_PREFIX "incl %0\n"
6618 + "sete %1\n"
6619 : "+m" (v->counter), "=qm" (c)
6620 : : "memory");
6621 return c != 0;
6622 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6623 {
6624 unsigned char c;
6625
6626 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6627 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6628 +
6629 +#ifdef CONFIG_PAX_REFCOUNT
6630 + "jno 0f\n"
6631 + LOCK_PREFIX "subl %2,%0\n"
6632 + "int $4\n0:\n"
6633 + _ASM_EXTABLE(0b, 0b)
6634 +#endif
6635 +
6636 + "sets %1\n"
6637 : "+m" (v->counter), "=qm" (c)
6638 : "ir" (i) : "memory");
6639 return c;
6640 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6641 #endif
6642 /* Modern 486+ processor */
6643 __i = i;
6644 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6645 +
6646 +#ifdef CONFIG_PAX_REFCOUNT
6647 + "jno 0f\n"
6648 + "movl %0, %1\n"
6649 + "int $4\n0:\n"
6650 + _ASM_EXTABLE(0b, 0b)
6651 +#endif
6652 +
6653 + : "+r" (i), "+m" (v->counter)
6654 + : : "memory");
6655 + return i + __i;
6656 +
6657 +#ifdef CONFIG_M386
6658 +no_xadd: /* Legacy 386 processor */
6659 + local_irq_save(flags);
6660 + __i = atomic_read(v);
6661 + atomic_set(v, i + __i);
6662 + local_irq_restore(flags);
6663 + return i + __i;
6664 +#endif
6665 +}
6666 +
6667 +/**
6668 + * atomic_add_return_unchecked - add integer and return
6669 + * @v: pointer of type atomic_unchecked_t
6670 + * @i: integer value to add
6671 + *
6672 + * Atomically adds @i to @v and returns @i + @v
6673 + */
6674 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6675 +{
6676 + int __i;
6677 +#ifdef CONFIG_M386
6678 + unsigned long flags;
6679 + if (unlikely(boot_cpu_data.x86 <= 3))
6680 + goto no_xadd;
6681 +#endif
6682 + /* Modern 486+ processor */
6683 + __i = i;
6684 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6685 : "+r" (i), "+m" (v->counter)
6686 : : "memory");
6687 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6688 }
6689
6690 #define atomic_inc_return(v) (atomic_add_return(1, v))
6691 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6692 +{
6693 + return atomic_add_return_unchecked(1, v);
6694 +}
6695 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6696
6697 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6698 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6699 return cmpxchg(&v->counter, old, new);
6700 }
6701
6702 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6703 +{
6704 + return cmpxchg(&v->counter, old, new);
6705 +}
6706 +
6707 static inline int atomic_xchg(atomic_t *v, int new)
6708 {
6709 return xchg(&v->counter, new);
6710 }
6711
6712 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6713 +{
6714 + return xchg(&v->counter, new);
6715 +}
6716 +
6717 /**
6718 * atomic_add_unless - add unless the number is already a given value
6719 * @v: pointer of type atomic_t
6720 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6721 */
6722 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6723 {
6724 - int c, old;
6725 + int c, old, new;
6726 c = atomic_read(v);
6727 for (;;) {
6728 - if (unlikely(c == (u)))
6729 + if (unlikely(c == u))
6730 break;
6731 - old = atomic_cmpxchg((v), c, c + (a));
6732 +
6733 + asm volatile("addl %2,%0\n"
6734 +
6735 +#ifdef CONFIG_PAX_REFCOUNT
6736 + "jno 0f\n"
6737 + "subl %2,%0\n"
6738 + "int $4\n0:\n"
6739 + _ASM_EXTABLE(0b, 0b)
6740 +#endif
6741 +
6742 + : "=r" (new)
6743 + : "0" (c), "ir" (a));
6744 +
6745 + old = atomic_cmpxchg(v, c, new);
6746 if (likely(old == c))
6747 break;
6748 c = old;
6749 }
6750 - return c != (u);
6751 + return c != u;
6752 }
6753
6754 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6755
6756 +/**
6757 + * atomic_inc_not_zero_hint - increment if not null
6758 + * @v: pointer of type atomic_t
6759 + * @hint: probable value of the atomic before the increment
6760 + *
6761 + * This version of atomic_inc_not_zero() gives a hint of probable
6762 + * value of the atomic. This helps processor to not read the memory
6763 + * before doing the atomic read/modify/write cycle, lowering
6764 + * number of bus transactions on some arches.
6765 + *
6766 + * Returns: 0 if increment was not done, 1 otherwise.
6767 + */
6768 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6769 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6770 +{
6771 + int val, c = hint, new;
6772 +
6773 + /* sanity test, should be removed by compiler if hint is a constant */
6774 + if (!hint)
6775 + return atomic_inc_not_zero(v);
6776 +
6777 + do {
6778 + asm volatile("incl %0\n"
6779 +
6780 +#ifdef CONFIG_PAX_REFCOUNT
6781 + "jno 0f\n"
6782 + "decl %0\n"
6783 + "int $4\n0:\n"
6784 + _ASM_EXTABLE(0b, 0b)
6785 +#endif
6786 +
6787 + : "=r" (new)
6788 + : "0" (c));
6789 +
6790 + val = atomic_cmpxchg(v, c, new);
6791 + if (val == c)
6792 + return 1;
6793 + c = val;
6794 + } while (c);
6795 +
6796 + return 0;
6797 +}
6798 +
6799 /*
6800 * atomic_dec_if_positive - decrement by 1 if old value positive
6801 * @v: pointer of type atomic_t
6802 diff -urNp linux-2.6.39.4/arch/x86/include/asm/bitops.h linux-2.6.39.4/arch/x86/include/asm/bitops.h
6803 --- linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-05-19 00:06:34.000000000 -0400
6804 +++ linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-08-05 19:44:33.000000000 -0400
6805 @@ -38,7 +38,7 @@
6806 * a mask operation on a byte.
6807 */
6808 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6809 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6810 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6811 #define CONST_MASK(nr) (1 << ((nr) & 7))
6812
6813 /**
6814 diff -urNp linux-2.6.39.4/arch/x86/include/asm/boot.h linux-2.6.39.4/arch/x86/include/asm/boot.h
6815 --- linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-05-19 00:06:34.000000000 -0400
6816 +++ linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-08-05 19:44:33.000000000 -0400
6817 @@ -11,10 +11,15 @@
6818 #include <asm/pgtable_types.h>
6819
6820 /* Physical address where kernel should be loaded. */
6821 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6822 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6823 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6824 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6825
6826 +#ifndef __ASSEMBLY__
6827 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6828 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6829 +#endif
6830 +
6831 /* Minimum kernel alignment, as a power of two */
6832 #ifdef CONFIG_X86_64
6833 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6834 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cacheflush.h linux-2.6.39.4/arch/x86/include/asm/cacheflush.h
6835 --- linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400
6836 +++ linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-08-05 19:44:33.000000000 -0400
6837 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6838 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6839
6840 if (pg_flags == _PGMT_DEFAULT)
6841 - return -1;
6842 + return ~0UL;
6843 else if (pg_flags == _PGMT_WC)
6844 return _PAGE_CACHE_WC;
6845 else if (pg_flags == _PGMT_UC_MINUS)
6846 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cache.h linux-2.6.39.4/arch/x86/include/asm/cache.h
6847 --- linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
6848 +++ linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
6849 @@ -5,12 +5,13 @@
6850
6851 /* L1 cache line size */
6852 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6853 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6854 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6855
6856 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6857 +#define __read_only __attribute__((__section__(".data..read_only")))
6858
6859 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6860 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6861 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6862
6863 #ifdef CONFIG_X86_VSMP
6864 #ifdef CONFIG_SMP
6865 diff -urNp linux-2.6.39.4/arch/x86/include/asm/checksum_32.h linux-2.6.39.4/arch/x86/include/asm/checksum_32.h
6866 --- linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-05-19 00:06:34.000000000 -0400
6867 +++ linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-08-05 19:44:33.000000000 -0400
6868 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6869 int len, __wsum sum,
6870 int *src_err_ptr, int *dst_err_ptr);
6871
6872 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6873 + int len, __wsum sum,
6874 + int *src_err_ptr, int *dst_err_ptr);
6875 +
6876 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6877 + int len, __wsum sum,
6878 + int *src_err_ptr, int *dst_err_ptr);
6879 +
6880 /*
6881 * Note: when you get a NULL pointer exception here this means someone
6882 * passed in an incorrect kernel address to one of these functions.
6883 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6884 int *err_ptr)
6885 {
6886 might_sleep();
6887 - return csum_partial_copy_generic((__force void *)src, dst,
6888 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6889 len, sum, err_ptr, NULL);
6890 }
6891
6892 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6893 {
6894 might_sleep();
6895 if (access_ok(VERIFY_WRITE, dst, len))
6896 - return csum_partial_copy_generic(src, (__force void *)dst,
6897 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6898 len, sum, NULL, err_ptr);
6899
6900 if (len)
6901 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cpufeature.h linux-2.6.39.4/arch/x86/include/asm/cpufeature.h
6902 --- linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-06-03 00:04:13.000000000 -0400
6903 +++ linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-08-05 19:44:33.000000000 -0400
6904 @@ -351,7 +351,7 @@ static __always_inline __pure bool __sta
6905 ".section .discard,\"aw\",@progbits\n"
6906 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6907 ".previous\n"
6908 - ".section .altinstr_replacement,\"ax\"\n"
6909 + ".section .altinstr_replacement,\"a\"\n"
6910 "3: movb $1,%0\n"
6911 "4:\n"
6912 ".previous\n"
6913 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc_defs.h linux-2.6.39.4/arch/x86/include/asm/desc_defs.h
6914 --- linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-05-19 00:06:34.000000000 -0400
6915 +++ linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-08-05 19:44:33.000000000 -0400
6916 @@ -31,6 +31,12 @@ struct desc_struct {
6917 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6918 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6919 };
6920 + struct {
6921 + u16 offset_low;
6922 + u16 seg;
6923 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6924 + unsigned offset_high: 16;
6925 + } gate;
6926 };
6927 } __attribute__((packed));
6928
6929 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc.h linux-2.6.39.4/arch/x86/include/asm/desc.h
6930 --- linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-05-19 00:06:34.000000000 -0400
6931 +++ linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-08-05 19:44:33.000000000 -0400
6932 @@ -4,6 +4,7 @@
6933 #include <asm/desc_defs.h>
6934 #include <asm/ldt.h>
6935 #include <asm/mmu.h>
6936 +#include <asm/pgtable.h>
6937 #include <linux/smp.h>
6938
6939 static inline void fill_ldt(struct desc_struct *desc,
6940 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
6941 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
6942 desc->type = (info->read_exec_only ^ 1) << 1;
6943 desc->type |= info->contents << 2;
6944 + desc->type |= info->seg_not_present ^ 1;
6945 desc->s = 1;
6946 desc->dpl = 0x3;
6947 desc->p = info->seg_not_present ^ 1;
6948 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
6949 }
6950
6951 extern struct desc_ptr idt_descr;
6952 -extern gate_desc idt_table[];
6953 -
6954 -struct gdt_page {
6955 - struct desc_struct gdt[GDT_ENTRIES];
6956 -} __attribute__((aligned(PAGE_SIZE)));
6957 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6958 +extern gate_desc idt_table[256];
6959
6960 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6961 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6962 {
6963 - return per_cpu(gdt_page, cpu).gdt;
6964 + return cpu_gdt_table[cpu];
6965 }
6966
6967 #ifdef CONFIG_X86_64
6968 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
6969 unsigned long base, unsigned dpl, unsigned flags,
6970 unsigned short seg)
6971 {
6972 - gate->a = (seg << 16) | (base & 0xffff);
6973 - gate->b = (base & 0xffff0000) |
6974 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6975 + gate->gate.offset_low = base;
6976 + gate->gate.seg = seg;
6977 + gate->gate.reserved = 0;
6978 + gate->gate.type = type;
6979 + gate->gate.s = 0;
6980 + gate->gate.dpl = dpl;
6981 + gate->gate.p = 1;
6982 + gate->gate.offset_high = base >> 16;
6983 }
6984
6985 #endif
6986 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
6987 static inline void native_write_idt_entry(gate_desc *idt, int entry,
6988 const gate_desc *gate)
6989 {
6990 + pax_open_kernel();
6991 memcpy(&idt[entry], gate, sizeof(*gate));
6992 + pax_close_kernel();
6993 }
6994
6995 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
6996 const void *desc)
6997 {
6998 + pax_open_kernel();
6999 memcpy(&ldt[entry], desc, 8);
7000 + pax_close_kernel();
7001 }
7002
7003 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
7004 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
7005 size = sizeof(struct desc_struct);
7006 break;
7007 }
7008 +
7009 + pax_open_kernel();
7010 memcpy(&gdt[entry], desc, size);
7011 + pax_close_kernel();
7012 }
7013
7014 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7015 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7016
7017 static inline void native_load_tr_desc(void)
7018 {
7019 + pax_open_kernel();
7020 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7021 + pax_close_kernel();
7022 }
7023
7024 static inline void native_load_gdt(const struct desc_ptr *dtr)
7025 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7026 unsigned int i;
7027 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7028
7029 + pax_open_kernel();
7030 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7031 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7032 + pax_close_kernel();
7033 }
7034
7035 #define _LDT_empty(info) \
7036 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7037 desc->limit = (limit >> 16) & 0xf;
7038 }
7039
7040 -static inline void _set_gate(int gate, unsigned type, void *addr,
7041 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7042 unsigned dpl, unsigned ist, unsigned seg)
7043 {
7044 gate_desc s;
7045 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7046 * Pentium F0 0F bugfix can have resulted in the mapped
7047 * IDT being write-protected.
7048 */
7049 -static inline void set_intr_gate(unsigned int n, void *addr)
7050 +static inline void set_intr_gate(unsigned int n, const void *addr)
7051 {
7052 BUG_ON((unsigned)n > 0xFF);
7053 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7054 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7055 /*
7056 * This routine sets up an interrupt gate at directory privilege level 3.
7057 */
7058 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7059 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7060 {
7061 BUG_ON((unsigned)n > 0xFF);
7062 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7063 }
7064
7065 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7066 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7067 {
7068 BUG_ON((unsigned)n > 0xFF);
7069 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7070 }
7071
7072 -static inline void set_trap_gate(unsigned int n, void *addr)
7073 +static inline void set_trap_gate(unsigned int n, const void *addr)
7074 {
7075 BUG_ON((unsigned)n > 0xFF);
7076 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7077 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
7078 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7079 {
7080 BUG_ON((unsigned)n > 0xFF);
7081 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7082 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7083 }
7084
7085 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7086 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7087 {
7088 BUG_ON((unsigned)n > 0xFF);
7089 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7090 }
7091
7092 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7093 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7094 {
7095 BUG_ON((unsigned)n > 0xFF);
7096 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7097 }
7098
7099 +#ifdef CONFIG_X86_32
7100 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7101 +{
7102 + struct desc_struct d;
7103 +
7104 + if (likely(limit))
7105 + limit = (limit - 1UL) >> PAGE_SHIFT;
7106 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7107 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7108 +}
7109 +#endif
7110 +
7111 #endif /* _ASM_X86_DESC_H */
7112 diff -urNp linux-2.6.39.4/arch/x86/include/asm/e820.h linux-2.6.39.4/arch/x86/include/asm/e820.h
7113 --- linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-05-19 00:06:34.000000000 -0400
7114 +++ linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-08-05 19:44:33.000000000 -0400
7115 @@ -69,7 +69,7 @@ struct e820map {
7116 #define ISA_START_ADDRESS 0xa0000
7117 #define ISA_END_ADDRESS 0x100000
7118
7119 -#define BIOS_BEGIN 0x000a0000
7120 +#define BIOS_BEGIN 0x000c0000
7121 #define BIOS_END 0x00100000
7122
7123 #define BIOS_ROM_BASE 0xffe00000
7124 diff -urNp linux-2.6.39.4/arch/x86/include/asm/elf.h linux-2.6.39.4/arch/x86/include/asm/elf.h
7125 --- linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
7126 +++ linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
7127 @@ -237,7 +237,25 @@ extern int force_personality32;
7128 the loader. We need to make sure that it is out of the way of the program
7129 that it will "exec", and that there is sufficient room for the brk. */
7130
7131 +#ifdef CONFIG_PAX_SEGMEXEC
7132 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7133 +#else
7134 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7135 +#endif
7136 +
7137 +#ifdef CONFIG_PAX_ASLR
7138 +#ifdef CONFIG_X86_32
7139 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7140 +
7141 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7142 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7143 +#else
7144 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7145 +
7146 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7147 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7148 +#endif
7149 +#endif
7150
7151 /* This yields a mask that user programs can use to figure out what
7152 instruction set this CPU supports. This could be done in user space,
7153 @@ -291,8 +309,7 @@ do { \
7154 #define ARCH_DLINFO \
7155 do { \
7156 if (vdso_enabled) \
7157 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7158 - (unsigned long)current->mm->context.vdso); \
7159 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
7160 } while (0)
7161
7162 #define AT_SYSINFO 32
7163 @@ -303,7 +320,7 @@ do { \
7164
7165 #endif /* !CONFIG_X86_32 */
7166
7167 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7168 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7169
7170 #define VDSO_ENTRY \
7171 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7172 @@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
7173 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7174 #define compat_arch_setup_additional_pages syscall32_setup_pages
7175
7176 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7177 -#define arch_randomize_brk arch_randomize_brk
7178 -
7179 #endif /* _ASM_X86_ELF_H */
7180 diff -urNp linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h
7181 --- linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-05-19 00:06:34.000000000 -0400
7182 +++ linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-08-05 19:44:33.000000000 -0400
7183 @@ -15,6 +15,6 @@ enum reboot_type {
7184
7185 extern enum reboot_type reboot_type;
7186
7187 -extern void machine_emergency_restart(void);
7188 +extern void machine_emergency_restart(void) __noreturn;
7189
7190 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7191 diff -urNp linux-2.6.39.4/arch/x86/include/asm/futex.h linux-2.6.39.4/arch/x86/include/asm/futex.h
7192 --- linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-05-19 00:06:34.000000000 -0400
7193 +++ linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-08-05 19:44:33.000000000 -0400
7194 @@ -12,16 +12,18 @@
7195 #include <asm/system.h>
7196
7197 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7198 + typecheck(u32 *, uaddr); \
7199 asm volatile("1:\t" insn "\n" \
7200 "2:\t.section .fixup,\"ax\"\n" \
7201 "3:\tmov\t%3, %1\n" \
7202 "\tjmp\t2b\n" \
7203 "\t.previous\n" \
7204 _ASM_EXTABLE(1b, 3b) \
7205 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7206 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7207 : "i" (-EFAULT), "0" (oparg), "1" (0))
7208
7209 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7210 + typecheck(u32 *, uaddr); \
7211 asm volatile("1:\tmovl %2, %0\n" \
7212 "\tmovl\t%0, %3\n" \
7213 "\t" insn "\n" \
7214 @@ -34,7 +36,7 @@
7215 _ASM_EXTABLE(1b, 4b) \
7216 _ASM_EXTABLE(2b, 4b) \
7217 : "=&a" (oldval), "=&r" (ret), \
7218 - "+m" (*uaddr), "=&r" (tem) \
7219 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7220 : "r" (oparg), "i" (-EFAULT), "1" (0))
7221
7222 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7223 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7224
7225 switch (op) {
7226 case FUTEX_OP_SET:
7227 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7228 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7229 break;
7230 case FUTEX_OP_ADD:
7231 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7232 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7233 uaddr, oparg);
7234 break;
7235 case FUTEX_OP_OR:
7236 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7237 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7238 return -EFAULT;
7239
7240 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7241 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7242 "2:\t.section .fixup, \"ax\"\n"
7243 "3:\tmov %3, %0\n"
7244 "\tjmp 2b\n"
7245 "\t.previous\n"
7246 _ASM_EXTABLE(1b, 3b)
7247 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7248 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7249 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7250 : "memory"
7251 );
7252 diff -urNp linux-2.6.39.4/arch/x86/include/asm/hw_irq.h linux-2.6.39.4/arch/x86/include/asm/hw_irq.h
7253 --- linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-05-19 00:06:34.000000000 -0400
7254 +++ linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-08-05 19:44:33.000000000 -0400
7255 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7256 extern void enable_IO_APIC(void);
7257
7258 /* Statistics */
7259 -extern atomic_t irq_err_count;
7260 -extern atomic_t irq_mis_count;
7261 +extern atomic_unchecked_t irq_err_count;
7262 +extern atomic_unchecked_t irq_mis_count;
7263
7264 /* EISA */
7265 extern void eisa_set_level_irq(unsigned int irq);
7266 diff -urNp linux-2.6.39.4/arch/x86/include/asm/i387.h linux-2.6.39.4/arch/x86/include/asm/i387.h
7267 --- linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-05-19 00:06:34.000000000 -0400
7268 +++ linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-08-05 19:44:33.000000000 -0400
7269 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7270 {
7271 int err;
7272
7273 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7274 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7275 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7276 +#endif
7277 +
7278 /* See comment in fxsave() below. */
7279 #ifdef CONFIG_AS_FXSAVEQ
7280 asm volatile("1: fxrstorq %[fx]\n\t"
7281 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7282 {
7283 int err;
7284
7285 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7286 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7287 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7288 +#endif
7289 +
7290 /*
7291 * Clear the bytes not touched by the fxsave and reserved
7292 * for the SW usage.
7293 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7294 #endif /* CONFIG_X86_64 */
7295
7296 /* We need a safe address that is cheap to find and that is already
7297 - in L1 during context switch. The best choices are unfortunately
7298 - different for UP and SMP */
7299 -#ifdef CONFIG_SMP
7300 -#define safe_address (__per_cpu_offset[0])
7301 -#else
7302 -#define safe_address (kstat_cpu(0).cpustat.user)
7303 -#endif
7304 + in L1 during context switch. */
7305 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7306
7307 /*
7308 * These must be called with preempt disabled
7309 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7310 struct thread_info *me = current_thread_info();
7311 preempt_disable();
7312 if (me->status & TS_USEDFPU)
7313 - __save_init_fpu(me->task);
7314 + __save_init_fpu(current);
7315 else
7316 clts();
7317 }
7318 diff -urNp linux-2.6.39.4/arch/x86/include/asm/io.h linux-2.6.39.4/arch/x86/include/asm/io.h
7319 --- linux-2.6.39.4/arch/x86/include/asm/io.h 2011-05-19 00:06:34.000000000 -0400
7320 +++ linux-2.6.39.4/arch/x86/include/asm/io.h 2011-08-05 19:44:33.000000000 -0400
7321 @@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void);
7322
7323 #include <linux/vmalloc.h>
7324
7325 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7326 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7327 +{
7328 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7329 +}
7330 +
7331 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7332 +{
7333 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7334 +}
7335 +
7336 /*
7337 * Convert a virtual cached pointer to an uncached pointer
7338 */
7339 diff -urNp linux-2.6.39.4/arch/x86/include/asm/irqflags.h linux-2.6.39.4/arch/x86/include/asm/irqflags.h
7340 --- linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-05-19 00:06:34.000000000 -0400
7341 +++ linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-08-05 19:44:33.000000000 -0400
7342 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7343 sti; \
7344 sysexit
7345
7346 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7347 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7348 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7349 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7350 +
7351 #else
7352 #define INTERRUPT_RETURN iret
7353 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7354 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kprobes.h linux-2.6.39.4/arch/x86/include/asm/kprobes.h
7355 --- linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-05-19 00:06:34.000000000 -0400
7356 +++ linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-08-05 19:44:33.000000000 -0400
7357 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7358 #define RELATIVEJUMP_SIZE 5
7359 #define RELATIVECALL_OPCODE 0xe8
7360 #define RELATIVE_ADDR_SIZE 4
7361 -#define MAX_STACK_SIZE 64
7362 -#define MIN_STACK_SIZE(ADDR) \
7363 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7364 - THREAD_SIZE - (unsigned long)(ADDR))) \
7365 - ? (MAX_STACK_SIZE) \
7366 - : (((unsigned long)current_thread_info()) + \
7367 - THREAD_SIZE - (unsigned long)(ADDR)))
7368 +#define MAX_STACK_SIZE 64UL
7369 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7370
7371 #define flush_insn_slot(p) do { } while (0)
7372
7373 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kvm_host.h linux-2.6.39.4/arch/x86/include/asm/kvm_host.h
7374 --- linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
7375 +++ linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-08-05 20:34:06.000000000 -0400
7376 @@ -419,7 +419,7 @@ struct kvm_arch {
7377 unsigned int n_used_mmu_pages;
7378 unsigned int n_requested_mmu_pages;
7379 unsigned int n_max_mmu_pages;
7380 - atomic_t invlpg_counter;
7381 + atomic_unchecked_t invlpg_counter;
7382 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7383 /*
7384 * Hash table of struct kvm_mmu_page.
7385 @@ -589,7 +589,7 @@ struct kvm_x86_ops {
7386 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
7387
7388 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
7389 - const struct trace_print_flags *exit_reasons_str;
7390 + const struct trace_print_flags * const exit_reasons_str;
7391 };
7392
7393 struct kvm_arch_async_pf {
7394 diff -urNp linux-2.6.39.4/arch/x86/include/asm/local.h linux-2.6.39.4/arch/x86/include/asm/local.h
7395 --- linux-2.6.39.4/arch/x86/include/asm/local.h 2011-05-19 00:06:34.000000000 -0400
7396 +++ linux-2.6.39.4/arch/x86/include/asm/local.h 2011-08-05 19:44:33.000000000 -0400
7397 @@ -18,26 +18,58 @@ typedef struct {
7398
7399 static inline void local_inc(local_t *l)
7400 {
7401 - asm volatile(_ASM_INC "%0"
7402 + asm volatile(_ASM_INC "%0\n"
7403 +
7404 +#ifdef CONFIG_PAX_REFCOUNT
7405 + "jno 0f\n"
7406 + _ASM_DEC "%0\n"
7407 + "int $4\n0:\n"
7408 + _ASM_EXTABLE(0b, 0b)
7409 +#endif
7410 +
7411 : "+m" (l->a.counter));
7412 }
7413
7414 static inline void local_dec(local_t *l)
7415 {
7416 - asm volatile(_ASM_DEC "%0"
7417 + asm volatile(_ASM_DEC "%0\n"
7418 +
7419 +#ifdef CONFIG_PAX_REFCOUNT
7420 + "jno 0f\n"
7421 + _ASM_INC "%0\n"
7422 + "int $4\n0:\n"
7423 + _ASM_EXTABLE(0b, 0b)
7424 +#endif
7425 +
7426 : "+m" (l->a.counter));
7427 }
7428
7429 static inline void local_add(long i, local_t *l)
7430 {
7431 - asm volatile(_ASM_ADD "%1,%0"
7432 + asm volatile(_ASM_ADD "%1,%0\n"
7433 +
7434 +#ifdef CONFIG_PAX_REFCOUNT
7435 + "jno 0f\n"
7436 + _ASM_SUB "%1,%0\n"
7437 + "int $4\n0:\n"
7438 + _ASM_EXTABLE(0b, 0b)
7439 +#endif
7440 +
7441 : "+m" (l->a.counter)
7442 : "ir" (i));
7443 }
7444
7445 static inline void local_sub(long i, local_t *l)
7446 {
7447 - asm volatile(_ASM_SUB "%1,%0"
7448 + asm volatile(_ASM_SUB "%1,%0\n"
7449 +
7450 +#ifdef CONFIG_PAX_REFCOUNT
7451 + "jno 0f\n"
7452 + _ASM_ADD "%1,%0\n"
7453 + "int $4\n0:\n"
7454 + _ASM_EXTABLE(0b, 0b)
7455 +#endif
7456 +
7457 : "+m" (l->a.counter)
7458 : "ir" (i));
7459 }
7460 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7461 {
7462 unsigned char c;
7463
7464 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7465 + asm volatile(_ASM_SUB "%2,%0\n"
7466 +
7467 +#ifdef CONFIG_PAX_REFCOUNT
7468 + "jno 0f\n"
7469 + _ASM_ADD "%2,%0\n"
7470 + "int $4\n0:\n"
7471 + _ASM_EXTABLE(0b, 0b)
7472 +#endif
7473 +
7474 + "sete %1\n"
7475 : "+m" (l->a.counter), "=qm" (c)
7476 : "ir" (i) : "memory");
7477 return c;
7478 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7479 {
7480 unsigned char c;
7481
7482 - asm volatile(_ASM_DEC "%0; sete %1"
7483 + asm volatile(_ASM_DEC "%0\n"
7484 +
7485 +#ifdef CONFIG_PAX_REFCOUNT
7486 + "jno 0f\n"
7487 + _ASM_INC "%0\n"
7488 + "int $4\n0:\n"
7489 + _ASM_EXTABLE(0b, 0b)
7490 +#endif
7491 +
7492 + "sete %1\n"
7493 : "+m" (l->a.counter), "=qm" (c)
7494 : : "memory");
7495 return c != 0;
7496 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7497 {
7498 unsigned char c;
7499
7500 - asm volatile(_ASM_INC "%0; sete %1"
7501 + asm volatile(_ASM_INC "%0\n"
7502 +
7503 +#ifdef CONFIG_PAX_REFCOUNT
7504 + "jno 0f\n"
7505 + _ASM_DEC "%0\n"
7506 + "int $4\n0:\n"
7507 + _ASM_EXTABLE(0b, 0b)
7508 +#endif
7509 +
7510 + "sete %1\n"
7511 : "+m" (l->a.counter), "=qm" (c)
7512 : : "memory");
7513 return c != 0;
7514 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7515 {
7516 unsigned char c;
7517
7518 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7519 + asm volatile(_ASM_ADD "%2,%0\n"
7520 +
7521 +#ifdef CONFIG_PAX_REFCOUNT
7522 + "jno 0f\n"
7523 + _ASM_SUB "%2,%0\n"
7524 + "int $4\n0:\n"
7525 + _ASM_EXTABLE(0b, 0b)
7526 +#endif
7527 +
7528 + "sets %1\n"
7529 : "+m" (l->a.counter), "=qm" (c)
7530 : "ir" (i) : "memory");
7531 return c;
7532 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7533 #endif
7534 /* Modern 486+ processor */
7535 __i = i;
7536 - asm volatile(_ASM_XADD "%0, %1;"
7537 + asm volatile(_ASM_XADD "%0, %1\n"
7538 +
7539 +#ifdef CONFIG_PAX_REFCOUNT
7540 + "jno 0f\n"
7541 + _ASM_MOV "%0,%1\n"
7542 + "int $4\n0:\n"
7543 + _ASM_EXTABLE(0b, 0b)
7544 +#endif
7545 +
7546 : "+r" (i), "+m" (l->a.counter)
7547 : : "memory");
7548 return i + __i;
7549 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mman.h linux-2.6.39.4/arch/x86/include/asm/mman.h
7550 --- linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-05-19 00:06:34.000000000 -0400
7551 +++ linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-08-05 19:44:33.000000000 -0400
7552 @@ -5,4 +5,14 @@
7553
7554 #include <asm-generic/mman.h>
7555
7556 +#ifdef __KERNEL__
7557 +#ifndef __ASSEMBLY__
7558 +#ifdef CONFIG_X86_32
7559 +#define arch_mmap_check i386_mmap_check
7560 +int i386_mmap_check(unsigned long addr, unsigned long len,
7561 + unsigned long flags);
7562 +#endif
7563 +#endif
7564 +#endif
7565 +
7566 #endif /* _ASM_X86_MMAN_H */
7567 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu_context.h linux-2.6.39.4/arch/x86/include/asm/mmu_context.h
7568 --- linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-05-19 00:06:34.000000000 -0400
7569 +++ linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-08-17 19:42:21.000000000 -0400
7570 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
7571
7572 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7573 {
7574 +
7575 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7576 + unsigned int i;
7577 + pgd_t *pgd;
7578 +
7579 + pax_open_kernel();
7580 + pgd = get_cpu_pgd(smp_processor_id());
7581 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7582 + if (paravirt_enabled())
7583 + set_pgd(pgd+i, native_make_pgd(0));
7584 + else
7585 + pgd[i] = native_make_pgd(0);
7586 + pax_close_kernel();
7587 +#endif
7588 +
7589 #ifdef CONFIG_SMP
7590 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7591 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7592 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
7593 struct task_struct *tsk)
7594 {
7595 unsigned cpu = smp_processor_id();
7596 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
7597 + int tlbstate = TLBSTATE_OK;
7598 +#endif
7599
7600 if (likely(prev != next)) {
7601 #ifdef CONFIG_SMP
7602 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7603 + tlbstate = percpu_read(cpu_tlbstate.state);
7604 +#endif
7605 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7606 percpu_write(cpu_tlbstate.active_mm, next);
7607 #endif
7608 cpumask_set_cpu(cpu, mm_cpumask(next));
7609
7610 /* Re-load page tables */
7611 +#ifdef CONFIG_PAX_PER_CPU_PGD
7612 + pax_open_kernel();
7613 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7614 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7615 + pax_close_kernel();
7616 + load_cr3(get_cpu_pgd(cpu));
7617 +#else
7618 load_cr3(next->pgd);
7619 +#endif
7620
7621 /* stop flush ipis for the previous mm */
7622 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7623 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
7624 */
7625 if (unlikely(prev->context.ldt != next->context.ldt))
7626 load_LDT_nolock(&next->context);
7627 - }
7628 +
7629 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7630 + if (!(__supported_pte_mask & _PAGE_NX)) {
7631 + smp_mb__before_clear_bit();
7632 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7633 + smp_mb__after_clear_bit();
7634 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7635 + }
7636 +#endif
7637 +
7638 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7639 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7640 + prev->context.user_cs_limit != next->context.user_cs_limit))
7641 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7642 #ifdef CONFIG_SMP
7643 + else if (unlikely(tlbstate != TLBSTATE_OK))
7644 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7645 +#endif
7646 +#endif
7647 +
7648 + }
7649 else {
7650 +
7651 +#ifdef CONFIG_PAX_PER_CPU_PGD
7652 + pax_open_kernel();
7653 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7654 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7655 + pax_close_kernel();
7656 + load_cr3(get_cpu_pgd(cpu));
7657 +#endif
7658 +
7659 +#ifdef CONFIG_SMP
7660 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7661 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7662
7663 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
7664 * tlb flush IPI delivery. We must reload CR3
7665 * to make sure to use no freed page tables.
7666 */
7667 +
7668 +#ifndef CONFIG_PAX_PER_CPU_PGD
7669 load_cr3(next->pgd);
7670 +#endif
7671 +
7672 load_LDT_nolock(&next->context);
7673 +
7674 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7675 + if (!(__supported_pte_mask & _PAGE_NX))
7676 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7677 +#endif
7678 +
7679 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7680 +#ifdef CONFIG_PAX_PAGEEXEC
7681 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7682 +#endif
7683 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7684 +#endif
7685 +
7686 }
7687 - }
7688 #endif
7689 + }
7690 }
7691
7692 #define activate_mm(prev, next) \
7693 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu.h linux-2.6.39.4/arch/x86/include/asm/mmu.h
7694 --- linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-05-19 00:06:34.000000000 -0400
7695 +++ linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-08-05 19:44:33.000000000 -0400
7696 @@ -9,10 +9,22 @@
7697 * we put the segment information here.
7698 */
7699 typedef struct {
7700 - void *ldt;
7701 + struct desc_struct *ldt;
7702 int size;
7703 struct mutex lock;
7704 - void *vdso;
7705 + unsigned long vdso;
7706 +
7707 +#ifdef CONFIG_X86_32
7708 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7709 + unsigned long user_cs_base;
7710 + unsigned long user_cs_limit;
7711 +
7712 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7713 + cpumask_t cpu_user_cs_mask;
7714 +#endif
7715 +
7716 +#endif
7717 +#endif
7718
7719 #ifdef CONFIG_X86_64
7720 /* True if mm supports a task running in 32 bit compatibility mode. */
7721 diff -urNp linux-2.6.39.4/arch/x86/include/asm/module.h linux-2.6.39.4/arch/x86/include/asm/module.h
7722 --- linux-2.6.39.4/arch/x86/include/asm/module.h 2011-05-19 00:06:34.000000000 -0400
7723 +++ linux-2.6.39.4/arch/x86/include/asm/module.h 2011-08-05 19:44:33.000000000 -0400
7724 @@ -5,6 +5,7 @@
7725
7726 #ifdef CONFIG_X86_64
7727 /* X86_64 does not define MODULE_PROC_FAMILY */
7728 +#define MODULE_PROC_FAMILY ""
7729 #elif defined CONFIG_M386
7730 #define MODULE_PROC_FAMILY "386 "
7731 #elif defined CONFIG_M486
7732 @@ -59,8 +60,30 @@
7733 #error unknown processor family
7734 #endif
7735
7736 -#ifdef CONFIG_X86_32
7737 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7739 +#define MODULE_PAX_UDEREF "UDEREF "
7740 +#else
7741 +#define MODULE_PAX_UDEREF ""
7742 +#endif
7743 +
7744 +#ifdef CONFIG_PAX_KERNEXEC
7745 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7746 +#else
7747 +#define MODULE_PAX_KERNEXEC ""
7748 #endif
7749
7750 +#ifdef CONFIG_PAX_REFCOUNT
7751 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7752 +#else
7753 +#define MODULE_PAX_REFCOUNT ""
7754 +#endif
7755 +
7756 +#ifdef CONFIG_GRKERNSEC
7757 +#define MODULE_GRSEC "GRSECURITY "
7758 +#else
7759 +#define MODULE_GRSEC ""
7760 +#endif
7761 +
7762 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7763 +
7764 #endif /* _ASM_X86_MODULE_H */
7765 diff -urNp linux-2.6.39.4/arch/x86/include/asm/page_64_types.h linux-2.6.39.4/arch/x86/include/asm/page_64_types.h
7766 --- linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-05-19 00:06:34.000000000 -0400
7767 +++ linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-08-05 19:44:33.000000000 -0400
7768 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7769
7770 /* duplicated to the one in bootmem.h */
7771 extern unsigned long max_pfn;
7772 -extern unsigned long phys_base;
7773 +extern const unsigned long phys_base;
7774
7775 extern unsigned long __phys_addr(unsigned long);
7776 #define __phys_reloc_hide(x) (x)
7777 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt.h linux-2.6.39.4/arch/x86/include/asm/paravirt.h
7778 --- linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-05-19 00:06:34.000000000 -0400
7779 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-08-05 19:44:33.000000000 -0400
7780 @@ -739,6 +739,21 @@ static inline void __set_fixmap(unsigned
7781 pv_mmu_ops.set_fixmap(idx, phys, flags);
7782 }
7783
7784 +#ifdef CONFIG_PAX_KERNEXEC
7785 +static inline unsigned long pax_open_kernel(void)
7786 +{
7787 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7788 +}
7789 +
7790 +static inline unsigned long pax_close_kernel(void)
7791 +{
7792 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7793 +}
7794 +#else
7795 +static inline unsigned long pax_open_kernel(void) { return 0; }
7796 +static inline unsigned long pax_close_kernel(void) { return 0; }
7797 +#endif
7798 +
7799 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7800
7801 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7802 @@ -955,7 +970,7 @@ extern void default_banner(void);
7803
7804 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7805 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7806 -#define PARA_INDIRECT(addr) *%cs:addr
7807 +#define PARA_INDIRECT(addr) *%ss:addr
7808 #endif
7809
7810 #define INTERRUPT_RETURN \
7811 @@ -1032,6 +1047,21 @@ extern void default_banner(void);
7812 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7813 CLBR_NONE, \
7814 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7815 +
7816 +#define GET_CR0_INTO_RDI \
7817 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7818 + mov %rax,%rdi
7819 +
7820 +#define SET_RDI_INTO_CR0 \
7821 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7822 +
7823 +#define GET_CR3_INTO_RDI \
7824 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7825 + mov %rax,%rdi
7826 +
7827 +#define SET_RDI_INTO_CR3 \
7828 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7829 +
7830 #endif /* CONFIG_X86_32 */
7831
7832 #endif /* __ASSEMBLY__ */
7833 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h
7834 --- linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-05-19 00:06:34.000000000 -0400
7835 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:34:06.000000000 -0400
7836 @@ -78,19 +78,19 @@ struct pv_init_ops {
7837 */
7838 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7839 unsigned long addr, unsigned len);
7840 -};
7841 +} __no_const;
7842
7843
7844 struct pv_lazy_ops {
7845 /* Set deferred update mode, used for batching operations. */
7846 void (*enter)(void);
7847 void (*leave)(void);
7848 -};
7849 +} __no_const;
7850
7851 struct pv_time_ops {
7852 unsigned long long (*sched_clock)(void);
7853 unsigned long (*get_tsc_khz)(void);
7854 -};
7855 +} __no_const;
7856
7857 struct pv_cpu_ops {
7858 /* hooks for various privileged instructions */
7859 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7860
7861 void (*start_context_switch)(struct task_struct *prev);
7862 void (*end_context_switch)(struct task_struct *next);
7863 -};
7864 +} __no_const;
7865
7866 struct pv_irq_ops {
7867 /*
7868 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7869 unsigned long start_eip,
7870 unsigned long start_esp);
7871 #endif
7872 -};
7873 +} __no_const;
7874
7875 struct pv_mmu_ops {
7876 unsigned long (*read_cr2)(void);
7877 @@ -317,6 +317,12 @@ struct pv_mmu_ops {
7878 an mfn. We can tell which is which from the index. */
7879 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7880 phys_addr_t phys, pgprot_t flags);
7881 +
7882 +#ifdef CONFIG_PAX_KERNEXEC
7883 + unsigned long (*pax_open_kernel)(void);
7884 + unsigned long (*pax_close_kernel)(void);
7885 +#endif
7886 +
7887 };
7888
7889 struct arch_spinlock;
7890 @@ -327,7 +333,7 @@ struct pv_lock_ops {
7891 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7892 int (*spin_trylock)(struct arch_spinlock *lock);
7893 void (*spin_unlock)(struct arch_spinlock *lock);
7894 -};
7895 +} __no_const;
7896
7897 /* This contains all the paravirt structures: we get a convenient
7898 * number for each function using the offset which we use to indicate
7899 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgalloc.h linux-2.6.39.4/arch/x86/include/asm/pgalloc.h
7900 --- linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-05-19 00:06:34.000000000 -0400
7901 +++ linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-08-05 19:44:33.000000000 -0400
7902 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7903 pmd_t *pmd, pte_t *pte)
7904 {
7905 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7906 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7907 +}
7908 +
7909 +static inline void pmd_populate_user(struct mm_struct *mm,
7910 + pmd_t *pmd, pte_t *pte)
7911 +{
7912 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7913 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7914 }
7915
7916 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h
7917 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-05-19 00:06:34.000000000 -0400
7918 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-08-05 19:44:33.000000000 -0400
7919 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7920
7921 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7922 {
7923 + pax_open_kernel();
7924 *pmdp = pmd;
7925 + pax_close_kernel();
7926 }
7927
7928 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7929 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h
7930 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
7931 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
7932 @@ -25,9 +25,6 @@
7933 struct mm_struct;
7934 struct vm_area_struct;
7935
7936 -extern pgd_t swapper_pg_dir[1024];
7937 -extern pgd_t initial_page_table[1024];
7938 -
7939 static inline void pgtable_cache_init(void) { }
7940 static inline void check_pgt_cache(void) { }
7941 void paging_init(void);
7942 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7943 # include <asm/pgtable-2level.h>
7944 #endif
7945
7946 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7947 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7948 +#ifdef CONFIG_X86_PAE
7949 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7950 +#endif
7951 +
7952 #if defined(CONFIG_HIGHPTE)
7953 #define pte_offset_map(dir, address) \
7954 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7955 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7956 /* Clear a kernel PTE and flush it from the TLB */
7957 #define kpte_clear_flush(ptep, vaddr) \
7958 do { \
7959 + pax_open_kernel(); \
7960 pte_clear(&init_mm, (vaddr), (ptep)); \
7961 + pax_close_kernel(); \
7962 __flush_tlb_one((vaddr)); \
7963 } while (0)
7964
7965 @@ -74,6 +79,9 @@ do { \
7966
7967 #endif /* !__ASSEMBLY__ */
7968
7969 +#define HAVE_ARCH_UNMAPPED_AREA
7970 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7971 +
7972 /*
7973 * kern_addr_valid() is (1) for FLATMEM and (0) for
7974 * SPARSEMEM and DISCONTIGMEM
7975 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h
7976 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-05-19 00:06:34.000000000 -0400
7977 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-05 19:44:33.000000000 -0400
7978 @@ -8,7 +8,7 @@
7979 */
7980 #ifdef CONFIG_X86_PAE
7981 # include <asm/pgtable-3level_types.h>
7982 -# define PMD_SIZE (1UL << PMD_SHIFT)
7983 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7984 # define PMD_MASK (~(PMD_SIZE - 1))
7985 #else
7986 # include <asm/pgtable-2level_types.h>
7987 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7988 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7989 #endif
7990
7991 +#ifdef CONFIG_PAX_KERNEXEC
7992 +#ifndef __ASSEMBLY__
7993 +extern unsigned char MODULES_EXEC_VADDR[];
7994 +extern unsigned char MODULES_EXEC_END[];
7995 +#endif
7996 +#include <asm/boot.h>
7997 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7998 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7999 +#else
8000 +#define ktla_ktva(addr) (addr)
8001 +#define ktva_ktla(addr) (addr)
8002 +#endif
8003 +
8004 #define MODULES_VADDR VMALLOC_START
8005 #define MODULES_END VMALLOC_END
8006 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8007 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h
8008 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-05-19 00:06:34.000000000 -0400
8009 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-08-05 19:44:33.000000000 -0400
8010 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8011
8012 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8013 {
8014 + pax_open_kernel();
8015 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8016 + pax_close_kernel();
8017 }
8018
8019 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8020 {
8021 + pax_open_kernel();
8022 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8023 + pax_close_kernel();
8024 }
8025
8026 /*
8027 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h
8028 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-05-19 00:06:34.000000000 -0400
8029 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-08-05 19:44:33.000000000 -0400
8030 @@ -16,10 +16,13 @@
8031
8032 extern pud_t level3_kernel_pgt[512];
8033 extern pud_t level3_ident_pgt[512];
8034 +extern pud_t level3_vmalloc_pgt[512];
8035 +extern pud_t level3_vmemmap_pgt[512];
8036 +extern pud_t level2_vmemmap_pgt[512];
8037 extern pmd_t level2_kernel_pgt[512];
8038 extern pmd_t level2_fixmap_pgt[512];
8039 -extern pmd_t level2_ident_pgt[512];
8040 -extern pgd_t init_level4_pgt[];
8041 +extern pmd_t level2_ident_pgt[512*2];
8042 +extern pgd_t init_level4_pgt[512];
8043
8044 #define swapper_pg_dir init_level4_pgt
8045
8046 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8047
8048 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8049 {
8050 + pax_open_kernel();
8051 *pmdp = pmd;
8052 + pax_close_kernel();
8053 }
8054
8055 static inline void native_pmd_clear(pmd_t *pmd)
8056 @@ -107,7 +112,9 @@ static inline void native_pud_clear(pud_
8057
8058 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8059 {
8060 + pax_open_kernel();
8061 *pgdp = pgd;
8062 + pax_close_kernel();
8063 }
8064
8065 static inline void native_pgd_clear(pgd_t *pgd)
8066 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h
8067 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-05-19 00:06:34.000000000 -0400
8068 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-05 19:44:33.000000000 -0400
8069 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8070 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8071 #define MODULES_END _AC(0xffffffffff000000, UL)
8072 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8073 +#define MODULES_EXEC_VADDR MODULES_VADDR
8074 +#define MODULES_EXEC_END MODULES_END
8075 +
8076 +#define ktla_ktva(addr) (addr)
8077 +#define ktva_ktla(addr) (addr)
8078
8079 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8080 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable.h linux-2.6.39.4/arch/x86/include/asm/pgtable.h
8081 --- linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
8082 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
8083 @@ -81,12 +81,51 @@ extern struct mm_struct *pgd_page_get_mm
8084
8085 #define arch_end_context_switch(prev) do {} while(0)
8086
8087 +#define pax_open_kernel() native_pax_open_kernel()
8088 +#define pax_close_kernel() native_pax_close_kernel()
8089 #endif /* CONFIG_PARAVIRT */
8090
8091 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8092 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8093 +
8094 +#ifdef CONFIG_PAX_KERNEXEC
8095 +static inline unsigned long native_pax_open_kernel(void)
8096 +{
8097 + unsigned long cr0;
8098 +
8099 + preempt_disable();
8100 + barrier();
8101 + cr0 = read_cr0() ^ X86_CR0_WP;
8102 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8103 + write_cr0(cr0);
8104 + return cr0 ^ X86_CR0_WP;
8105 +}
8106 +
8107 +static inline unsigned long native_pax_close_kernel(void)
8108 +{
8109 + unsigned long cr0;
8110 +
8111 + cr0 = read_cr0() ^ X86_CR0_WP;
8112 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8113 + write_cr0(cr0);
8114 + barrier();
8115 + preempt_enable_no_resched();
8116 + return cr0 ^ X86_CR0_WP;
8117 +}
8118 +#else
8119 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8120 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8121 +#endif
8122 +
8123 /*
8124 * The following only work if pte_present() is true.
8125 * Undefined behaviour if not..
8126 */
8127 +static inline int pte_user(pte_t pte)
8128 +{
8129 + return pte_val(pte) & _PAGE_USER;
8130 +}
8131 +
8132 static inline int pte_dirty(pte_t pte)
8133 {
8134 return pte_flags(pte) & _PAGE_DIRTY;
8135 @@ -196,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t
8136 return pte_clear_flags(pte, _PAGE_RW);
8137 }
8138
8139 +static inline pte_t pte_mkread(pte_t pte)
8140 +{
8141 + return __pte(pte_val(pte) | _PAGE_USER);
8142 +}
8143 +
8144 static inline pte_t pte_mkexec(pte_t pte)
8145 {
8146 - return pte_clear_flags(pte, _PAGE_NX);
8147 +#ifdef CONFIG_X86_PAE
8148 + if (__supported_pte_mask & _PAGE_NX)
8149 + return pte_clear_flags(pte, _PAGE_NX);
8150 + else
8151 +#endif
8152 + return pte_set_flags(pte, _PAGE_USER);
8153 +}
8154 +
8155 +static inline pte_t pte_exprotect(pte_t pte)
8156 +{
8157 +#ifdef CONFIG_X86_PAE
8158 + if (__supported_pte_mask & _PAGE_NX)
8159 + return pte_set_flags(pte, _PAGE_NX);
8160 + else
8161 +#endif
8162 + return pte_clear_flags(pte, _PAGE_USER);
8163 }
8164
8165 static inline pte_t pte_mkdirty(pte_t pte)
8166 @@ -390,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long
8167 #endif
8168
8169 #ifndef __ASSEMBLY__
8170 +
8171 +#ifdef CONFIG_PAX_PER_CPU_PGD
8172 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8173 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8174 +{
8175 + return cpu_pgd[cpu];
8176 +}
8177 +#endif
8178 +
8179 #include <linux/mm_types.h>
8180
8181 static inline int pte_none(pte_t pte)
8182 @@ -560,7 +628,7 @@ static inline pud_t *pud_offset(pgd_t *p
8183
8184 static inline int pgd_bad(pgd_t pgd)
8185 {
8186 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8187 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8188 }
8189
8190 static inline int pgd_none(pgd_t pgd)
8191 @@ -583,7 +651,12 @@ static inline int pgd_none(pgd_t pgd)
8192 * pgd_offset() returns a (pgd_t *)
8193 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8194 */
8195 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8196 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8197 +
8198 +#ifdef CONFIG_PAX_PER_CPU_PGD
8199 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8200 +#endif
8201 +
8202 /*
8203 * a shortcut which implies the use of the kernel's pgd, instead
8204 * of a process's
8205 @@ -594,6 +667,20 @@ static inline int pgd_none(pgd_t pgd)
8206 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8207 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8208
8209 +#ifdef CONFIG_X86_32
8210 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8211 +#else
8212 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8213 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8214 +
8215 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8216 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8217 +#else
8218 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8219 +#endif
8220 +
8221 +#endif
8222 +
8223 #ifndef __ASSEMBLY__
8224
8225 extern int direct_gbpages;
8226 @@ -758,11 +845,23 @@ static inline void pmdp_set_wrprotect(st
8227 * dst and src can be on the same page, but the range must not overlap,
8228 * and must not cross a page boundary.
8229 */
8230 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8231 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8232 {
8233 - memcpy(dst, src, count * sizeof(pgd_t));
8234 + pax_open_kernel();
8235 + while (count--)
8236 + *dst++ = *src++;
8237 + pax_close_kernel();
8238 }
8239
8240 +#ifdef CONFIG_PAX_PER_CPU_PGD
8241 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8242 +#endif
8243 +
8244 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8245 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8246 +#else
8247 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8248 +#endif
8249
8250 #include <asm-generic/pgtable.h>
8251 #endif /* __ASSEMBLY__ */
8252 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h
8253 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-05-19 00:06:34.000000000 -0400
8254 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-08-05 19:44:33.000000000 -0400
8255 @@ -16,13 +16,12 @@
8256 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8257 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8258 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8259 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8260 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8261 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8262 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8263 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8264 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8265 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8266 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8267 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8268 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8269 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8270
8271 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8272 @@ -40,7 +39,6 @@
8273 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8274 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8275 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8276 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8277 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8278 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8279 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8280 @@ -57,8 +55,10 @@
8281
8282 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8283 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8284 -#else
8285 +#elif defined(CONFIG_KMEMCHECK)
8286 #define _PAGE_NX (_AT(pteval_t, 0))
8287 +#else
8288 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8289 #endif
8290
8291 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8292 @@ -96,6 +96,9 @@
8293 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8294 _PAGE_ACCESSED)
8295
8296 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8297 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8298 +
8299 #define __PAGE_KERNEL_EXEC \
8300 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8301 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8302 @@ -106,8 +109,8 @@
8303 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8304 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8305 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8306 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8307 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8308 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8309 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8310 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8311 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8312 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8313 @@ -166,8 +169,8 @@
8314 * bits are combined, this will alow user to access the high address mapped
8315 * VDSO in the presence of CONFIG_COMPAT_VDSO
8316 */
8317 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8318 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8319 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8320 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8321 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8322 #endif
8323
8324 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8325 {
8326 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8327 }
8328 +#endif
8329
8330 +#if PAGETABLE_LEVELS == 3
8331 +#include <asm-generic/pgtable-nopud.h>
8332 +#endif
8333 +
8334 +#if PAGETABLE_LEVELS == 2
8335 +#include <asm-generic/pgtable-nopmd.h>
8336 +#endif
8337 +
8338 +#ifndef __ASSEMBLY__
8339 #if PAGETABLE_LEVELS > 3
8340 typedef struct { pudval_t pud; } pud_t;
8341
8342 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8343 return pud.pud;
8344 }
8345 #else
8346 -#include <asm-generic/pgtable-nopud.h>
8347 -
8348 static inline pudval_t native_pud_val(pud_t pud)
8349 {
8350 return native_pgd_val(pud.pgd);
8351 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8352 return pmd.pmd;
8353 }
8354 #else
8355 -#include <asm-generic/pgtable-nopmd.h>
8356 -
8357 static inline pmdval_t native_pmd_val(pmd_t pmd)
8358 {
8359 return native_pgd_val(pmd.pud.pgd);
8360 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8361
8362 extern pteval_t __supported_pte_mask;
8363 extern void set_nx(void);
8364 -extern int nx_enabled;
8365
8366 #define pgprot_writecombine pgprot_writecombine
8367 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8368 diff -urNp linux-2.6.39.4/arch/x86/include/asm/processor.h linux-2.6.39.4/arch/x86/include/asm/processor.h
8369 --- linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-05-19 00:06:34.000000000 -0400
8370 +++ linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-08-05 19:44:33.000000000 -0400
8371 @@ -266,7 +266,7 @@ struct tss_struct {
8372
8373 } ____cacheline_aligned;
8374
8375 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8376 +extern struct tss_struct init_tss[NR_CPUS];
8377
8378 /*
8379 * Save the original ist values for checking stack pointers during debugging
8380 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8381 */
8382 #define TASK_SIZE PAGE_OFFSET
8383 #define TASK_SIZE_MAX TASK_SIZE
8384 +
8385 +#ifdef CONFIG_PAX_SEGMEXEC
8386 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8387 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8388 +#else
8389 #define STACK_TOP TASK_SIZE
8390 -#define STACK_TOP_MAX STACK_TOP
8391 +#endif
8392 +
8393 +#define STACK_TOP_MAX TASK_SIZE
8394
8395 #define INIT_THREAD { \
8396 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8397 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8398 .vm86_info = NULL, \
8399 .sysenter_cs = __KERNEL_CS, \
8400 .io_bitmap_ptr = NULL, \
8401 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8402 */
8403 #define INIT_TSS { \
8404 .x86_tss = { \
8405 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8406 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8407 .ss0 = __KERNEL_DS, \
8408 .ss1 = __KERNEL_CS, \
8409 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8410 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8411 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8412
8413 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8414 -#define KSTK_TOP(info) \
8415 -({ \
8416 - unsigned long *__ptr = (unsigned long *)(info); \
8417 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8418 -})
8419 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8420
8421 /*
8422 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8423 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8424 #define task_pt_regs(task) \
8425 ({ \
8426 struct pt_regs *__regs__; \
8427 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8428 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8429 __regs__ - 1; \
8430 })
8431
8432 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8433 /*
8434 * User space process size. 47bits minus one guard page.
8435 */
8436 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8437 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8438
8439 /* This decides where the kernel will search for a free chunk of vm
8440 * space during mmap's.
8441 */
8442 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8443 - 0xc0000000 : 0xFFFFe000)
8444 + 0xc0000000 : 0xFFFFf000)
8445
8446 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8447 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8448 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8449 #define STACK_TOP_MAX TASK_SIZE_MAX
8450
8451 #define INIT_THREAD { \
8452 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8453 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8454 }
8455
8456 #define INIT_TSS { \
8457 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8458 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8459 }
8460
8461 /*
8462 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8463 */
8464 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8465
8466 +#ifdef CONFIG_PAX_SEGMEXEC
8467 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8468 +#endif
8469 +
8470 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8471
8472 /* Get/set a process' ability to use the timestamp counter instruction */
8473 diff -urNp linux-2.6.39.4/arch/x86/include/asm/ptrace.h linux-2.6.39.4/arch/x86/include/asm/ptrace.h
8474 --- linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-05-19 00:06:34.000000000 -0400
8475 +++ linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-08-05 19:44:33.000000000 -0400
8476 @@ -152,28 +152,29 @@ static inline unsigned long regs_return_
8477 }
8478
8479 /*
8480 - * user_mode_vm(regs) determines whether a register set came from user mode.
8481 + * user_mode(regs) determines whether a register set came from user mode.
8482 * This is true if V8086 mode was enabled OR if the register set was from
8483 * protected mode with RPL-3 CS value. This tricky test checks that with
8484 * one comparison. Many places in the kernel can bypass this full check
8485 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8486 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8487 + * be used.
8488 */
8489 -static inline int user_mode(struct pt_regs *regs)
8490 +static inline int user_mode_novm(struct pt_regs *regs)
8491 {
8492 #ifdef CONFIG_X86_32
8493 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8494 #else
8495 - return !!(regs->cs & 3);
8496 + return !!(regs->cs & SEGMENT_RPL_MASK);
8497 #endif
8498 }
8499
8500 -static inline int user_mode_vm(struct pt_regs *regs)
8501 +static inline int user_mode(struct pt_regs *regs)
8502 {
8503 #ifdef CONFIG_X86_32
8504 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8505 USER_RPL;
8506 #else
8507 - return user_mode(regs);
8508 + return user_mode_novm(regs);
8509 #endif
8510 }
8511
8512 diff -urNp linux-2.6.39.4/arch/x86/include/asm/reboot.h linux-2.6.39.4/arch/x86/include/asm/reboot.h
8513 --- linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-05-19 00:06:34.000000000 -0400
8514 +++ linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-08-05 20:34:06.000000000 -0400
8515 @@ -6,19 +6,19 @@
8516 struct pt_regs;
8517
8518 struct machine_ops {
8519 - void (*restart)(char *cmd);
8520 - void (*halt)(void);
8521 - void (*power_off)(void);
8522 + void (* __noreturn restart)(char *cmd);
8523 + void (* __noreturn halt)(void);
8524 + void (* __noreturn power_off)(void);
8525 void (*shutdown)(void);
8526 void (*crash_shutdown)(struct pt_regs *);
8527 - void (*emergency_restart)(void);
8528 -};
8529 + void (* __noreturn emergency_restart)(void);
8530 +} __no_const;
8531
8532 extern struct machine_ops machine_ops;
8533
8534 void native_machine_crash_shutdown(struct pt_regs *regs);
8535 void native_machine_shutdown(void);
8536 -void machine_real_restart(unsigned int type);
8537 +void machine_real_restart(unsigned int type) __noreturn;
8538 /* These must match dispatch_table in reboot_32.S */
8539 #define MRR_BIOS 0
8540 #define MRR_APM 1
8541 diff -urNp linux-2.6.39.4/arch/x86/include/asm/rwsem.h linux-2.6.39.4/arch/x86/include/asm/rwsem.h
8542 --- linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-05-19 00:06:34.000000000 -0400
8543 +++ linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-08-05 19:44:33.000000000 -0400
8544 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8545 {
8546 asm volatile("# beginning down_read\n\t"
8547 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8548 +
8549 +#ifdef CONFIG_PAX_REFCOUNT
8550 + "jno 0f\n"
8551 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8552 + "int $4\n0:\n"
8553 + _ASM_EXTABLE(0b, 0b)
8554 +#endif
8555 +
8556 /* adds 0x00000001 */
8557 " jns 1f\n"
8558 " call call_rwsem_down_read_failed\n"
8559 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8560 "1:\n\t"
8561 " mov %1,%2\n\t"
8562 " add %3,%2\n\t"
8563 +
8564 +#ifdef CONFIG_PAX_REFCOUNT
8565 + "jno 0f\n"
8566 + "sub %3,%2\n"
8567 + "int $4\n0:\n"
8568 + _ASM_EXTABLE(0b, 0b)
8569 +#endif
8570 +
8571 " jle 2f\n\t"
8572 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8573 " jnz 1b\n\t"
8574 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8575 long tmp;
8576 asm volatile("# beginning down_write\n\t"
8577 LOCK_PREFIX " xadd %1,(%2)\n\t"
8578 +
8579 +#ifdef CONFIG_PAX_REFCOUNT
8580 + "jno 0f\n"
8581 + "mov %1,(%2)\n"
8582 + "int $4\n0:\n"
8583 + _ASM_EXTABLE(0b, 0b)
8584 +#endif
8585 +
8586 /* adds 0xffff0001, returns the old value */
8587 " test %1,%1\n\t"
8588 /* was the count 0 before? */
8589 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8590 long tmp;
8591 asm volatile("# beginning __up_read\n\t"
8592 LOCK_PREFIX " xadd %1,(%2)\n\t"
8593 +
8594 +#ifdef CONFIG_PAX_REFCOUNT
8595 + "jno 0f\n"
8596 + "mov %1,(%2)\n"
8597 + "int $4\n0:\n"
8598 + _ASM_EXTABLE(0b, 0b)
8599 +#endif
8600 +
8601 /* subtracts 1, returns the old value */
8602 " jns 1f\n\t"
8603 " call call_rwsem_wake\n" /* expects old value in %edx */
8604 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8605 long tmp;
8606 asm volatile("# beginning __up_write\n\t"
8607 LOCK_PREFIX " xadd %1,(%2)\n\t"
8608 +
8609 +#ifdef CONFIG_PAX_REFCOUNT
8610 + "jno 0f\n"
8611 + "mov %1,(%2)\n"
8612 + "int $4\n0:\n"
8613 + _ASM_EXTABLE(0b, 0b)
8614 +#endif
8615 +
8616 /* subtracts 0xffff0001, returns the old value */
8617 " jns 1f\n\t"
8618 " call call_rwsem_wake\n" /* expects old value in %edx */
8619 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8620 {
8621 asm volatile("# beginning __downgrade_write\n\t"
8622 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8623 +
8624 +#ifdef CONFIG_PAX_REFCOUNT
8625 + "jno 0f\n"
8626 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8627 + "int $4\n0:\n"
8628 + _ASM_EXTABLE(0b, 0b)
8629 +#endif
8630 +
8631 /*
8632 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8633 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8634 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8635 */
8636 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8637 {
8638 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8639 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8640 +
8641 +#ifdef CONFIG_PAX_REFCOUNT
8642 + "jno 0f\n"
8643 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8644 + "int $4\n0:\n"
8645 + _ASM_EXTABLE(0b, 0b)
8646 +#endif
8647 +
8648 : "+m" (sem->count)
8649 : "er" (delta));
8650 }
8651 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8652 {
8653 long tmp = delta;
8654
8655 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8656 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8657 +
8658 +#ifdef CONFIG_PAX_REFCOUNT
8659 + "jno 0f\n"
8660 + "mov %0,%1\n"
8661 + "int $4\n0:\n"
8662 + _ASM_EXTABLE(0b, 0b)
8663 +#endif
8664 +
8665 : "+r" (tmp), "+m" (sem->count)
8666 : : "memory");
8667
8668 diff -urNp linux-2.6.39.4/arch/x86/include/asm/segment.h linux-2.6.39.4/arch/x86/include/asm/segment.h
8669 --- linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-05-19 00:06:34.000000000 -0400
8670 +++ linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-08-05 19:44:33.000000000 -0400
8671 @@ -64,8 +64,8 @@
8672 * 26 - ESPFIX small SS
8673 * 27 - per-cpu [ offset to per-cpu data area ]
8674 * 28 - stack_canary-20 [ for stack protector ]
8675 - * 29 - unused
8676 - * 30 - unused
8677 + * 29 - PCI BIOS CS
8678 + * 30 - PCI BIOS DS
8679 * 31 - TSS for double fault handler
8680 */
8681 #define GDT_ENTRY_TLS_MIN 6
8682 @@ -79,6 +79,8 @@
8683
8684 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8685
8686 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8687 +
8688 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8689
8690 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8691 @@ -104,6 +106,12 @@
8692 #define __KERNEL_STACK_CANARY 0
8693 #endif
8694
8695 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8696 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8697 +
8698 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8699 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8700 +
8701 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8702
8703 /*
8704 @@ -141,7 +149,7 @@
8705 */
8706
8707 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8708 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8709 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8710
8711
8712 #else
8713 @@ -165,6 +173,8 @@
8714 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8715 #define __USER32_DS __USER_DS
8716
8717 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8718 +
8719 #define GDT_ENTRY_TSS 8 /* needs two entries */
8720 #define GDT_ENTRY_LDT 10 /* needs two entries */
8721 #define GDT_ENTRY_TLS_MIN 12
8722 @@ -185,6 +195,7 @@
8723 #endif
8724
8725 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8726 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8727 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8728 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8729 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8730 diff -urNp linux-2.6.39.4/arch/x86/include/asm/smp.h linux-2.6.39.4/arch/x86/include/asm/smp.h
8731 --- linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-05-19 00:06:34.000000000 -0400
8732 +++ linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-08-05 20:34:06.000000000 -0400
8733 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8734 /* cpus sharing the last level cache: */
8735 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8736 DECLARE_PER_CPU(u16, cpu_llc_id);
8737 -DECLARE_PER_CPU(int, cpu_number);
8738 +DECLARE_PER_CPU(unsigned int, cpu_number);
8739
8740 static inline struct cpumask *cpu_sibling_mask(int cpu)
8741 {
8742 @@ -77,7 +77,7 @@ struct smp_ops {
8743
8744 void (*send_call_func_ipi)(const struct cpumask *mask);
8745 void (*send_call_func_single_ipi)(int cpu);
8746 -};
8747 +} __no_const;
8748
8749 /* Globals due to paravirt */
8750 extern void set_cpu_sibling_map(int cpu);
8751 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8752 extern int safe_smp_processor_id(void);
8753
8754 #elif defined(CONFIG_X86_64_SMP)
8755 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8756 -
8757 -#define stack_smp_processor_id() \
8758 -({ \
8759 - struct thread_info *ti; \
8760 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8761 - ti->cpu; \
8762 -})
8763 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8764 +#define stack_smp_processor_id() raw_smp_processor_id()
8765 #define safe_smp_processor_id() smp_processor_id()
8766
8767 #endif
8768 diff -urNp linux-2.6.39.4/arch/x86/include/asm/spinlock.h linux-2.6.39.4/arch/x86/include/asm/spinlock.h
8769 --- linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
8770 +++ linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
8771 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8772 static inline void arch_read_lock(arch_rwlock_t *rw)
8773 {
8774 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8775 +
8776 +#ifdef CONFIG_PAX_REFCOUNT
8777 + "jno 0f\n"
8778 + LOCK_PREFIX " addl $1,(%0)\n"
8779 + "int $4\n0:\n"
8780 + _ASM_EXTABLE(0b, 0b)
8781 +#endif
8782 +
8783 "jns 1f\n"
8784 "call __read_lock_failed\n\t"
8785 "1:\n"
8786 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8787 static inline void arch_write_lock(arch_rwlock_t *rw)
8788 {
8789 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8790 +
8791 +#ifdef CONFIG_PAX_REFCOUNT
8792 + "jno 0f\n"
8793 + LOCK_PREFIX " addl %1,(%0)\n"
8794 + "int $4\n0:\n"
8795 + _ASM_EXTABLE(0b, 0b)
8796 +#endif
8797 +
8798 "jz 1f\n"
8799 "call __write_lock_failed\n\t"
8800 "1:\n"
8801 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8802
8803 static inline void arch_read_unlock(arch_rwlock_t *rw)
8804 {
8805 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8806 + asm volatile(LOCK_PREFIX "incl %0\n"
8807 +
8808 +#ifdef CONFIG_PAX_REFCOUNT
8809 + "jno 0f\n"
8810 + LOCK_PREFIX "decl %0\n"
8811 + "int $4\n0:\n"
8812 + _ASM_EXTABLE(0b, 0b)
8813 +#endif
8814 +
8815 + :"+m" (rw->lock) : : "memory");
8816 }
8817
8818 static inline void arch_write_unlock(arch_rwlock_t *rw)
8819 {
8820 - asm volatile(LOCK_PREFIX "addl %1, %0"
8821 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8822 +
8823 +#ifdef CONFIG_PAX_REFCOUNT
8824 + "jno 0f\n"
8825 + LOCK_PREFIX "subl %1, %0\n"
8826 + "int $4\n0:\n"
8827 + _ASM_EXTABLE(0b, 0b)
8828 +#endif
8829 +
8830 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8831 }
8832
8833 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stackprotector.h linux-2.6.39.4/arch/x86/include/asm/stackprotector.h
8834 --- linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-05-19 00:06:34.000000000 -0400
8835 +++ linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-08-05 19:44:33.000000000 -0400
8836 @@ -48,7 +48,7 @@
8837 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8838 */
8839 #define GDT_STACK_CANARY_INIT \
8840 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8841 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8842
8843 /*
8844 * Initialize the stackprotector canary value.
8845 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8846
8847 static inline void load_stack_canary_segment(void)
8848 {
8849 -#ifdef CONFIG_X86_32
8850 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8851 asm volatile ("mov %0, %%gs" : : "r" (0));
8852 #endif
8853 }
8854 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stacktrace.h linux-2.6.39.4/arch/x86/include/asm/stacktrace.h
8855 --- linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-05-19 00:06:34.000000000 -0400
8856 +++ linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-08-05 19:44:33.000000000 -0400
8857 @@ -11,28 +11,20 @@
8858
8859 extern int kstack_depth_to_print;
8860
8861 -struct thread_info;
8862 +struct task_struct;
8863 struct stacktrace_ops;
8864
8865 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8866 - unsigned long *stack,
8867 - unsigned long bp,
8868 - const struct stacktrace_ops *ops,
8869 - void *data,
8870 - unsigned long *end,
8871 - int *graph);
8872 -
8873 -extern unsigned long
8874 -print_context_stack(struct thread_info *tinfo,
8875 - unsigned long *stack, unsigned long bp,
8876 - const struct stacktrace_ops *ops, void *data,
8877 - unsigned long *end, int *graph);
8878 -
8879 -extern unsigned long
8880 -print_context_stack_bp(struct thread_info *tinfo,
8881 - unsigned long *stack, unsigned long bp,
8882 - const struct stacktrace_ops *ops, void *data,
8883 - unsigned long *end, int *graph);
8884 +typedef unsigned long walk_stack_t(struct task_struct *task,
8885 + void *stack_start,
8886 + unsigned long *stack,
8887 + unsigned long bp,
8888 + const struct stacktrace_ops *ops,
8889 + void *data,
8890 + unsigned long *end,
8891 + int *graph);
8892 +
8893 +extern walk_stack_t print_context_stack;
8894 +extern walk_stack_t print_context_stack_bp;
8895
8896 /* Generic stack tracer with callbacks */
8897
8898 @@ -43,7 +35,7 @@ struct stacktrace_ops {
8899 void (*address)(void *data, unsigned long address, int reliable);
8900 /* On negative return stop dumping */
8901 int (*stack)(void *data, char *name);
8902 - walk_stack_t walk_stack;
8903 + walk_stack_t *walk_stack;
8904 };
8905
8906 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8907 diff -urNp linux-2.6.39.4/arch/x86/include/asm/system.h linux-2.6.39.4/arch/x86/include/asm/system.h
8908 --- linux-2.6.39.4/arch/x86/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
8909 +++ linux-2.6.39.4/arch/x86/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
8910 @@ -129,7 +129,7 @@ do { \
8911 "call __switch_to\n\t" \
8912 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8913 __switch_canary \
8914 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8915 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8916 "movq %%rax,%%rdi\n\t" \
8917 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8918 "jnz ret_from_fork\n\t" \
8919 @@ -140,7 +140,7 @@ do { \
8920 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8921 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8922 [_tif_fork] "i" (_TIF_FORK), \
8923 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8924 + [thread_info] "m" (current_tinfo), \
8925 [current_task] "m" (current_task) \
8926 __switch_canary_iparam \
8927 : "memory", "cc" __EXTRA_CLOBBER)
8928 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8929 {
8930 unsigned long __limit;
8931 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8932 - return __limit + 1;
8933 + return __limit;
8934 }
8935
8936 static inline void native_clts(void)
8937 @@ -340,12 +340,12 @@ void enable_hlt(void);
8938
8939 void cpu_idle_wait(void);
8940
8941 -extern unsigned long arch_align_stack(unsigned long sp);
8942 +#define arch_align_stack(x) ((x) & ~0xfUL)
8943 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8944
8945 void default_idle(void);
8946
8947 -void stop_this_cpu(void *dummy);
8948 +void stop_this_cpu(void *dummy) __noreturn;
8949
8950 /*
8951 * Force strict CPU ordering.
8952 diff -urNp linux-2.6.39.4/arch/x86/include/asm/thread_info.h linux-2.6.39.4/arch/x86/include/asm/thread_info.h
8953 --- linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-05-19 00:06:34.000000000 -0400
8954 +++ linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-08-05 19:44:33.000000000 -0400
8955 @@ -10,6 +10,7 @@
8956 #include <linux/compiler.h>
8957 #include <asm/page.h>
8958 #include <asm/types.h>
8959 +#include <asm/percpu.h>
8960
8961 /*
8962 * low level task data that entry.S needs immediate access to
8963 @@ -24,7 +25,6 @@ struct exec_domain;
8964 #include <asm/atomic.h>
8965
8966 struct thread_info {
8967 - struct task_struct *task; /* main task structure */
8968 struct exec_domain *exec_domain; /* execution domain */
8969 __u32 flags; /* low level flags */
8970 __u32 status; /* thread synchronous flags */
8971 @@ -34,18 +34,12 @@ struct thread_info {
8972 mm_segment_t addr_limit;
8973 struct restart_block restart_block;
8974 void __user *sysenter_return;
8975 -#ifdef CONFIG_X86_32
8976 - unsigned long previous_esp; /* ESP of the previous stack in
8977 - case of nested (IRQ) stacks
8978 - */
8979 - __u8 supervisor_stack[0];
8980 -#endif
8981 + unsigned long lowest_stack;
8982 int uaccess_err;
8983 };
8984
8985 -#define INIT_THREAD_INFO(tsk) \
8986 +#define INIT_THREAD_INFO \
8987 { \
8988 - .task = &tsk, \
8989 .exec_domain = &default_exec_domain, \
8990 .flags = 0, \
8991 .cpu = 0, \
8992 @@ -56,7 +50,7 @@ struct thread_info {
8993 }, \
8994 }
8995
8996 -#define init_thread_info (init_thread_union.thread_info)
8997 +#define init_thread_info (init_thread_union.stack)
8998 #define init_stack (init_thread_union.stack)
8999
9000 #else /* !__ASSEMBLY__ */
9001 @@ -170,6 +164,23 @@ struct thread_info {
9002 ret; \
9003 })
9004
9005 +#ifdef __ASSEMBLY__
9006 +/* how to get the thread information struct from ASM */
9007 +#define GET_THREAD_INFO(reg) \
9008 + mov PER_CPU_VAR(current_tinfo), reg
9009 +
9010 +/* use this one if reg already contains %esp */
9011 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9012 +#else
9013 +/* how to get the thread information struct from C */
9014 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9015 +
9016 +static __always_inline struct thread_info *current_thread_info(void)
9017 +{
9018 + return percpu_read_stable(current_tinfo);
9019 +}
9020 +#endif
9021 +
9022 #ifdef CONFIG_X86_32
9023
9024 #define STACK_WARN (THREAD_SIZE/8)
9025 @@ -180,35 +191,13 @@ struct thread_info {
9026 */
9027 #ifndef __ASSEMBLY__
9028
9029 -
9030 /* how to get the current stack pointer from C */
9031 register unsigned long current_stack_pointer asm("esp") __used;
9032
9033 -/* how to get the thread information struct from C */
9034 -static inline struct thread_info *current_thread_info(void)
9035 -{
9036 - return (struct thread_info *)
9037 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9038 -}
9039 -
9040 -#else /* !__ASSEMBLY__ */
9041 -
9042 -/* how to get the thread information struct from ASM */
9043 -#define GET_THREAD_INFO(reg) \
9044 - movl $-THREAD_SIZE, reg; \
9045 - andl %esp, reg
9046 -
9047 -/* use this one if reg already contains %esp */
9048 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9049 - andl $-THREAD_SIZE, reg
9050 -
9051 #endif
9052
9053 #else /* X86_32 */
9054
9055 -#include <asm/percpu.h>
9056 -#define KERNEL_STACK_OFFSET (5*8)
9057 -
9058 /*
9059 * macros/functions for gaining access to the thread information structure
9060 * preempt_count needs to be 1 initially, until the scheduler is functional.
9061 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9062 #ifndef __ASSEMBLY__
9063 DECLARE_PER_CPU(unsigned long, kernel_stack);
9064
9065 -static inline struct thread_info *current_thread_info(void)
9066 -{
9067 - struct thread_info *ti;
9068 - ti = (void *)(percpu_read_stable(kernel_stack) +
9069 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9070 - return ti;
9071 -}
9072 -
9073 -#else /* !__ASSEMBLY__ */
9074 -
9075 -/* how to get the thread information struct from ASM */
9076 -#define GET_THREAD_INFO(reg) \
9077 - movq PER_CPU_VAR(kernel_stack),reg ; \
9078 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9079 -
9080 +/* how to get the current stack pointer from C */
9081 +register unsigned long current_stack_pointer asm("rsp") __used;
9082 #endif
9083
9084 #endif /* !X86_32 */
9085 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9086 extern void free_thread_info(struct thread_info *ti);
9087 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9088 #define arch_task_cache_init arch_task_cache_init
9089 +
9090 +#define __HAVE_THREAD_FUNCTIONS
9091 +#define task_thread_info(task) (&(task)->tinfo)
9092 +#define task_stack_page(task) ((task)->stack)
9093 +#define setup_thread_stack(p, org) do {} while (0)
9094 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9095 +
9096 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9097 +extern struct task_struct *alloc_task_struct_node(int node);
9098 +extern void free_task_struct(struct task_struct *);
9099 +
9100 #endif
9101 #endif /* _ASM_X86_THREAD_INFO_H */
9102 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h
9103 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
9104 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
9105 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
9106 static __always_inline unsigned long __must_check
9107 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9108 {
9109 + pax_track_stack();
9110 +
9111 + if ((long)n < 0)
9112 + return n;
9113 +
9114 if (__builtin_constant_p(n)) {
9115 unsigned long ret;
9116
9117 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
9118 return ret;
9119 }
9120 }
9121 + if (!__builtin_constant_p(n))
9122 + check_object_size(from, n, true);
9123 return __copy_to_user_ll(to, from, n);
9124 }
9125
9126 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
9127 __copy_to_user(void __user *to, const void *from, unsigned long n)
9128 {
9129 might_fault();
9130 +
9131 return __copy_to_user_inatomic(to, from, n);
9132 }
9133
9134 static __always_inline unsigned long
9135 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9136 {
9137 + if ((long)n < 0)
9138 + return n;
9139 +
9140 /* Avoid zeroing the tail if the copy fails..
9141 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9142 * but as the zeroing behaviour is only significant when n is not
9143 @@ -138,6 +149,12 @@ static __always_inline unsigned long
9144 __copy_from_user(void *to, const void __user *from, unsigned long n)
9145 {
9146 might_fault();
9147 +
9148 + pax_track_stack();
9149 +
9150 + if ((long)n < 0)
9151 + return n;
9152 +
9153 if (__builtin_constant_p(n)) {
9154 unsigned long ret;
9155
9156 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
9157 return ret;
9158 }
9159 }
9160 + if (!__builtin_constant_p(n))
9161 + check_object_size(to, n, false);
9162 return __copy_from_user_ll(to, from, n);
9163 }
9164
9165 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
9166 const void __user *from, unsigned long n)
9167 {
9168 might_fault();
9169 +
9170 + if ((long)n < 0)
9171 + return n;
9172 +
9173 if (__builtin_constant_p(n)) {
9174 unsigned long ret;
9175
9176 @@ -182,15 +205,19 @@ static __always_inline unsigned long
9177 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9178 unsigned long n)
9179 {
9180 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9181 -}
9182 + if ((long)n < 0)
9183 + return n;
9184
9185 -unsigned long __must_check copy_to_user(void __user *to,
9186 - const void *from, unsigned long n);
9187 -unsigned long __must_check _copy_from_user(void *to,
9188 - const void __user *from,
9189 - unsigned long n);
9190 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9191 +}
9192
9193 +extern void copy_to_user_overflow(void)
9194 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9195 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9196 +#else
9197 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9198 +#endif
9199 +;
9200
9201 extern void copy_from_user_overflow(void)
9202 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9203 @@ -200,17 +227,61 @@ extern void copy_from_user_overflow(void
9204 #endif
9205 ;
9206
9207 -static inline unsigned long __must_check copy_from_user(void *to,
9208 - const void __user *from,
9209 - unsigned long n)
9210 +/**
9211 + * copy_to_user: - Copy a block of data into user space.
9212 + * @to: Destination address, in user space.
9213 + * @from: Source address, in kernel space.
9214 + * @n: Number of bytes to copy.
9215 + *
9216 + * Context: User context only. This function may sleep.
9217 + *
9218 + * Copy data from kernel space to user space.
9219 + *
9220 + * Returns number of bytes that could not be copied.
9221 + * On success, this will be zero.
9222 + */
9223 +static inline unsigned long __must_check
9224 +copy_to_user(void __user *to, const void *from, unsigned long n)
9225 +{
9226 + int sz = __compiletime_object_size(from);
9227 +
9228 + if (unlikely(sz != -1 && sz < n))
9229 + copy_to_user_overflow();
9230 + else if (access_ok(VERIFY_WRITE, to, n))
9231 + n = __copy_to_user(to, from, n);
9232 + return n;
9233 +}
9234 +
9235 +/**
9236 + * copy_from_user: - Copy a block of data from user space.
9237 + * @to: Destination address, in kernel space.
9238 + * @from: Source address, in user space.
9239 + * @n: Number of bytes to copy.
9240 + *
9241 + * Context: User context only. This function may sleep.
9242 + *
9243 + * Copy data from user space to kernel space.
9244 + *
9245 + * Returns number of bytes that could not be copied.
9246 + * On success, this will be zero.
9247 + *
9248 + * If some data could not be copied, this function will pad the copied
9249 + * data to the requested size using zero bytes.
9250 + */
9251 +static inline unsigned long __must_check
9252 +copy_from_user(void *to, const void __user *from, unsigned long n)
9253 {
9254 int sz = __compiletime_object_size(to);
9255
9256 - if (likely(sz == -1 || sz >= n))
9257 - n = _copy_from_user(to, from, n);
9258 - else
9259 + if (unlikely(sz != -1 && sz < n))
9260 copy_from_user_overflow();
9261 -
9262 + else if (access_ok(VERIFY_READ, from, n))
9263 + n = __copy_from_user(to, from, n);
9264 + else if ((long)n > 0) {
9265 + if (!__builtin_constant_p(n))
9266 + check_object_size(to, n, false);
9267 + memset(to, 0, n);
9268 + }
9269 return n;
9270 }
9271
9272 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h
9273 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
9274 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
9275 @@ -11,6 +11,9 @@
9276 #include <asm/alternative.h>
9277 #include <asm/cpufeature.h>
9278 #include <asm/page.h>
9279 +#include <asm/pgtable.h>
9280 +
9281 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9282
9283 /*
9284 * Copy To/From Userspace
9285 @@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *
9286 return ret;
9287 }
9288
9289 -__must_check unsigned long
9290 -_copy_to_user(void __user *to, const void *from, unsigned len);
9291 -__must_check unsigned long
9292 -_copy_from_user(void *to, const void __user *from, unsigned len);
9293 +static __always_inline __must_check unsigned long
9294 +__copy_to_user(void __user *to, const void *from, unsigned len);
9295 +static __always_inline __must_check unsigned long
9296 +__copy_from_user(void *to, const void __user *from, unsigned len);
9297 __must_check unsigned long
9298 copy_in_user(void __user *to, const void __user *from, unsigned len);
9299
9300 static inline unsigned long __must_check copy_from_user(void *to,
9301 const void __user *from,
9302 - unsigned long n)
9303 + unsigned n)
9304 {
9305 - int sz = __compiletime_object_size(to);
9306 -
9307 might_fault();
9308 - if (likely(sz == -1 || sz >= n))
9309 - n = _copy_from_user(to, from, n);
9310 -#ifdef CONFIG_DEBUG_VM
9311 - else
9312 - WARN(1, "Buffer overflow detected!\n");
9313 -#endif
9314 +
9315 + if (access_ok(VERIFY_READ, from, n))
9316 + n = __copy_from_user(to, from, n);
9317 + else if ((int)n > 0) {
9318 + if (!__builtin_constant_p(n))
9319 + check_object_size(to, n, false);
9320 + memset(to, 0, n);
9321 + }
9322 return n;
9323 }
9324
9325 @@ -65,110 +68,198 @@ int copy_to_user(void __user *dst, const
9326 {
9327 might_fault();
9328
9329 - return _copy_to_user(dst, src, size);
9330 + if (access_ok(VERIFY_WRITE, dst, size))
9331 + size = __copy_to_user(dst, src, size);
9332 + return size;
9333 }
9334
9335 static __always_inline __must_check
9336 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9337 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9338 {
9339 - int ret = 0;
9340 + int sz = __compiletime_object_size(dst);
9341 + unsigned ret = 0;
9342
9343 might_fault();
9344 - if (!__builtin_constant_p(size))
9345 - return copy_user_generic(dst, (__force void *)src, size);
9346 +
9347 + pax_track_stack();
9348 +
9349 + if ((int)size < 0)
9350 + return size;
9351 +
9352 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9353 + if (!__access_ok(VERIFY_READ, src, size))
9354 + return size;
9355 +#endif
9356 +
9357 + if (unlikely(sz != -1 && sz < size)) {
9358 +#ifdef CONFIG_DEBUG_VM
9359 + WARN(1, "Buffer overflow detected!\n");
9360 +#endif
9361 + return size;
9362 + }
9363 +
9364 + if (!__builtin_constant_p(size)) {
9365 + check_object_size(dst, size, false);
9366 +
9367 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9368 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9369 + src += PAX_USER_SHADOW_BASE;
9370 +#endif
9371 +
9372 + return copy_user_generic(dst, (__force const void *)src, size);
9373 + }
9374 switch (size) {
9375 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9376 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9377 ret, "b", "b", "=q", 1);
9378 return ret;
9379 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9380 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9381 ret, "w", "w", "=r", 2);
9382 return ret;
9383 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9384 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9385 ret, "l", "k", "=r", 4);
9386 return ret;
9387 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9388 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9389 ret, "q", "", "=r", 8);
9390 return ret;
9391 case 10:
9392 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9393 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9394 ret, "q", "", "=r", 10);
9395 if (unlikely(ret))
9396 return ret;
9397 __get_user_asm(*(u16 *)(8 + (char *)dst),
9398 - (u16 __user *)(8 + (char __user *)src),
9399 + (const u16 __user *)(8 + (const char __user *)src),
9400 ret, "w", "w", "=r", 2);
9401 return ret;
9402 case 16:
9403 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9404 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9405 ret, "q", "", "=r", 16);
9406 if (unlikely(ret))
9407 return ret;
9408 __get_user_asm(*(u64 *)(8 + (char *)dst),
9409 - (u64 __user *)(8 + (char __user *)src),
9410 + (const u64 __user *)(8 + (const char __user *)src),
9411 ret, "q", "", "=r", 8);
9412 return ret;
9413 default:
9414 - return copy_user_generic(dst, (__force void *)src, size);
9415 +
9416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9417 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9418 + src += PAX_USER_SHADOW_BASE;
9419 +#endif
9420 +
9421 + return copy_user_generic(dst, (__force const void *)src, size);
9422 }
9423 }
9424
9425 static __always_inline __must_check
9426 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9427 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9428 {
9429 - int ret = 0;
9430 + int sz = __compiletime_object_size(src);
9431 + unsigned ret = 0;
9432
9433 might_fault();
9434 - if (!__builtin_constant_p(size))
9435 +
9436 + pax_track_stack();
9437 +
9438 + if ((int)size < 0)
9439 + return size;
9440 +
9441 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9442 + if (!__access_ok(VERIFY_WRITE, dst, size))
9443 + return size;
9444 +#endif
9445 +
9446 + if (unlikely(sz != -1 && sz < size)) {
9447 +#ifdef CONFIG_DEBUG_VM
9448 + WARN(1, "Buffer overflow detected!\n");
9449 +#endif
9450 + return size;
9451 + }
9452 +
9453 + if (!__builtin_constant_p(size)) {
9454 + check_object_size(src, size, true);
9455 +
9456 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9457 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9458 + dst += PAX_USER_SHADOW_BASE;
9459 +#endif
9460 +
9461 return copy_user_generic((__force void *)dst, src, size);
9462 + }
9463 switch (size) {
9464 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9465 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9466 ret, "b", "b", "iq", 1);
9467 return ret;
9468 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9469 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9470 ret, "w", "w", "ir", 2);
9471 return ret;
9472 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9473 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9474 ret, "l", "k", "ir", 4);
9475 return ret;
9476 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9477 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9478 ret, "q", "", "er", 8);
9479 return ret;
9480 case 10:
9481 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9482 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9483 ret, "q", "", "er", 10);
9484 if (unlikely(ret))
9485 return ret;
9486 asm("":::"memory");
9487 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9488 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9489 ret, "w", "w", "ir", 2);
9490 return ret;
9491 case 16:
9492 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9493 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9494 ret, "q", "", "er", 16);
9495 if (unlikely(ret))
9496 return ret;
9497 asm("":::"memory");
9498 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9499 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9500 ret, "q", "", "er", 8);
9501 return ret;
9502 default:
9503 +
9504 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9505 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9506 + dst += PAX_USER_SHADOW_BASE;
9507 +#endif
9508 +
9509 return copy_user_generic((__force void *)dst, src, size);
9510 }
9511 }
9512
9513 static __always_inline __must_check
9514 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9515 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9516 {
9517 - int ret = 0;
9518 + unsigned ret = 0;
9519
9520 might_fault();
9521 - if (!__builtin_constant_p(size))
9522 +
9523 + if ((int)size < 0)
9524 + return size;
9525 +
9526 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9527 + if (!__access_ok(VERIFY_READ, src, size))
9528 + return size;
9529 + if (!__access_ok(VERIFY_WRITE, dst, size))
9530 + return size;
9531 +#endif
9532 +
9533 + if (!__builtin_constant_p(size)) {
9534 +
9535 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9536 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9537 + src += PAX_USER_SHADOW_BASE;
9538 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9539 + dst += PAX_USER_SHADOW_BASE;
9540 +#endif
9541 +
9542 return copy_user_generic((__force void *)dst,
9543 - (__force void *)src, size);
9544 + (__force const void *)src, size);
9545 + }
9546 switch (size) {
9547 case 1: {
9548 u8 tmp;
9549 - __get_user_asm(tmp, (u8 __user *)src,
9550 + __get_user_asm(tmp, (const u8 __user *)src,
9551 ret, "b", "b", "=q", 1);
9552 if (likely(!ret))
9553 __put_user_asm(tmp, (u8 __user *)dst,
9554 @@ -177,7 +268,7 @@ int __copy_in_user(void __user *dst, con
9555 }
9556 case 2: {
9557 u16 tmp;
9558 - __get_user_asm(tmp, (u16 __user *)src,
9559 + __get_user_asm(tmp, (const u16 __user *)src,
9560 ret, "w", "w", "=r", 2);
9561 if (likely(!ret))
9562 __put_user_asm(tmp, (u16 __user *)dst,
9563 @@ -187,7 +278,7 @@ int __copy_in_user(void __user *dst, con
9564
9565 case 4: {
9566 u32 tmp;
9567 - __get_user_asm(tmp, (u32 __user *)src,
9568 + __get_user_asm(tmp, (const u32 __user *)src,
9569 ret, "l", "k", "=r", 4);
9570 if (likely(!ret))
9571 __put_user_asm(tmp, (u32 __user *)dst,
9572 @@ -196,7 +287,7 @@ int __copy_in_user(void __user *dst, con
9573 }
9574 case 8: {
9575 u64 tmp;
9576 - __get_user_asm(tmp, (u64 __user *)src,
9577 + __get_user_asm(tmp, (const u64 __user *)src,
9578 ret, "q", "", "=r", 8);
9579 if (likely(!ret))
9580 __put_user_asm(tmp, (u64 __user *)dst,
9581 @@ -204,8 +295,16 @@ int __copy_in_user(void __user *dst, con
9582 return ret;
9583 }
9584 default:
9585 +
9586 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9587 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9588 + src += PAX_USER_SHADOW_BASE;
9589 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9590 + dst += PAX_USER_SHADOW_BASE;
9591 +#endif
9592 +
9593 return copy_user_generic((__force void *)dst,
9594 - (__force void *)src, size);
9595 + (__force const void *)src, size);
9596 }
9597 }
9598
9599 @@ -222,33 +321,72 @@ __must_check unsigned long __clear_user(
9600 static __must_check __always_inline int
9601 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9602 {
9603 + pax_track_stack();
9604 +
9605 + if ((int)size < 0)
9606 + return size;
9607 +
9608 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9609 + if (!__access_ok(VERIFY_READ, src, size))
9610 + return size;
9611 +
9612 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9613 + src += PAX_USER_SHADOW_BASE;
9614 +#endif
9615 +
9616 return copy_user_generic(dst, (__force const void *)src, size);
9617 }
9618
9619 -static __must_check __always_inline int
9620 +static __must_check __always_inline unsigned long
9621 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9622 {
9623 + if ((int)size < 0)
9624 + return size;
9625 +
9626 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9627 + if (!__access_ok(VERIFY_WRITE, dst, size))
9628 + return size;
9629 +
9630 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9631 + dst += PAX_USER_SHADOW_BASE;
9632 +#endif
9633 +
9634 return copy_user_generic((__force void *)dst, src, size);
9635 }
9636
9637 -extern long __copy_user_nocache(void *dst, const void __user *src,
9638 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9639 unsigned size, int zerorest);
9640
9641 -static inline int
9642 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9643 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9644 {
9645 might_sleep();
9646 +
9647 + if ((int)size < 0)
9648 + return size;
9649 +
9650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9651 + if (!__access_ok(VERIFY_READ, src, size))
9652 + return size;
9653 +#endif
9654 +
9655 return __copy_user_nocache(dst, src, size, 1);
9656 }
9657
9658 -static inline int
9659 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9660 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9661 unsigned size)
9662 {
9663 + if ((int)size < 0)
9664 + return size;
9665 +
9666 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9667 + if (!__access_ok(VERIFY_READ, src, size))
9668 + return size;
9669 +#endif
9670 +
9671 return __copy_user_nocache(dst, src, size, 0);
9672 }
9673
9674 -unsigned long
9675 +extern unsigned long
9676 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9677
9678 #endif /* _ASM_X86_UACCESS_64_H */
9679 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess.h linux-2.6.39.4/arch/x86/include/asm/uaccess.h
9680 --- linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-06-03 00:04:13.000000000 -0400
9681 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
9682 @@ -8,12 +8,15 @@
9683 #include <linux/thread_info.h>
9684 #include <linux/prefetch.h>
9685 #include <linux/string.h>
9686 +#include <linux/sched.h>
9687 #include <asm/asm.h>
9688 #include <asm/page.h>
9689
9690 #define VERIFY_READ 0
9691 #define VERIFY_WRITE 1
9692
9693 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9694 +
9695 /*
9696 * The fs value determines whether argument validity checking should be
9697 * performed or not. If get_fs() == USER_DS, checking is performed, with
9698 @@ -29,7 +32,12 @@
9699
9700 #define get_ds() (KERNEL_DS)
9701 #define get_fs() (current_thread_info()->addr_limit)
9702 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9703 +void __set_fs(mm_segment_t x);
9704 +void set_fs(mm_segment_t x);
9705 +#else
9706 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9707 +#endif
9708
9709 #define segment_eq(a, b) ((a).seg == (b).seg)
9710
9711 @@ -77,7 +85,33 @@
9712 * checks that the pointer is in the user space range - after calling
9713 * this function, memory access functions may still return -EFAULT.
9714 */
9715 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9716 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9717 +#define access_ok(type, addr, size) \
9718 +({ \
9719 + long __size = size; \
9720 + unsigned long __addr = (unsigned long)addr; \
9721 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9722 + unsigned long __end_ao = __addr + __size - 1; \
9723 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9724 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9725 + while(__addr_ao <= __end_ao) { \
9726 + char __c_ao; \
9727 + __addr_ao += PAGE_SIZE; \
9728 + if (__size > PAGE_SIZE) \
9729 + cond_resched(); \
9730 + if (__get_user(__c_ao, (char __user *)__addr)) \
9731 + break; \
9732 + if (type != VERIFY_WRITE) { \
9733 + __addr = __addr_ao; \
9734 + continue; \
9735 + } \
9736 + if (__put_user(__c_ao, (char __user *)__addr)) \
9737 + break; \
9738 + __addr = __addr_ao; \
9739 + } \
9740 + } \
9741 + __ret_ao; \
9742 +})
9743
9744 /*
9745 * The exception table consists of pairs of addresses: the first is the
9746 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
9747 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9748 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9749
9750 -
9751 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9752 +#define __copyuser_seg "gs;"
9753 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9754 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9755 +#else
9756 +#define __copyuser_seg
9757 +#define __COPYUSER_SET_ES
9758 +#define __COPYUSER_RESTORE_ES
9759 +#endif
9760
9761 #ifdef CONFIG_X86_32
9762 #define __put_user_asm_u64(x, addr, err, errret) \
9763 - asm volatile("1: movl %%eax,0(%2)\n" \
9764 - "2: movl %%edx,4(%2)\n" \
9765 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9766 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9767 "3:\n" \
9768 ".section .fixup,\"ax\"\n" \
9769 "4: movl %3,%0\n" \
9770 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
9771 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9772
9773 #define __put_user_asm_ex_u64(x, addr) \
9774 - asm volatile("1: movl %%eax,0(%1)\n" \
9775 - "2: movl %%edx,4(%1)\n" \
9776 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9777 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9778 "3:\n" \
9779 _ASM_EXTABLE(1b, 2b - 1b) \
9780 _ASM_EXTABLE(2b, 3b - 2b) \
9781 @@ -374,7 +416,7 @@ do { \
9782 } while (0)
9783
9784 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9785 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9786 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9787 "2:\n" \
9788 ".section .fixup,\"ax\"\n" \
9789 "3: mov %3,%0\n" \
9790 @@ -382,7 +424,7 @@ do { \
9791 " jmp 2b\n" \
9792 ".previous\n" \
9793 _ASM_EXTABLE(1b, 3b) \
9794 - : "=r" (err), ltype(x) \
9795 + : "=r" (err), ltype (x) \
9796 : "m" (__m(addr)), "i" (errret), "0" (err))
9797
9798 #define __get_user_size_ex(x, ptr, size) \
9799 @@ -407,7 +449,7 @@ do { \
9800 } while (0)
9801
9802 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9803 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9804 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9805 "2:\n" \
9806 _ASM_EXTABLE(1b, 2b - 1b) \
9807 : ltype(x) : "m" (__m(addr)))
9808 @@ -424,13 +466,24 @@ do { \
9809 int __gu_err; \
9810 unsigned long __gu_val; \
9811 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9812 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9813 + (x) = (__typeof__(*(ptr)))__gu_val; \
9814 __gu_err; \
9815 })
9816
9817 /* FIXME: this hack is definitely wrong -AK */
9818 struct __large_struct { unsigned long buf[100]; };
9819 -#define __m(x) (*(struct __large_struct __user *)(x))
9820 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9821 +#define ____m(x) \
9822 +({ \
9823 + unsigned long ____x = (unsigned long)(x); \
9824 + if (____x < PAX_USER_SHADOW_BASE) \
9825 + ____x += PAX_USER_SHADOW_BASE; \
9826 + (void __user *)____x; \
9827 +})
9828 +#else
9829 +#define ____m(x) (x)
9830 +#endif
9831 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9832
9833 /*
9834 * Tell gcc we read from memory instead of writing: this is because
9835 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
9836 * aliasing issues.
9837 */
9838 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9839 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9840 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9841 "2:\n" \
9842 ".section .fixup,\"ax\"\n" \
9843 "3: mov %3,%0\n" \
9844 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
9845 ".previous\n" \
9846 _ASM_EXTABLE(1b, 3b) \
9847 : "=r"(err) \
9848 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9849 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9850
9851 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9852 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9853 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9854 "2:\n" \
9855 _ASM_EXTABLE(1b, 2b - 1b) \
9856 : : ltype(x), "m" (__m(addr)))
9857 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
9858 * On error, the variable @x is set to zero.
9859 */
9860
9861 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9862 +#define __get_user(x, ptr) get_user((x), (ptr))
9863 +#else
9864 #define __get_user(x, ptr) \
9865 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9866 +#endif
9867
9868 /**
9869 * __put_user: - Write a simple value into user space, with less checking.
9870 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
9871 * Returns zero on success, or -EFAULT on error.
9872 */
9873
9874 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9875 +#define __put_user(x, ptr) put_user((x), (ptr))
9876 +#else
9877 #define __put_user(x, ptr) \
9878 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9879 +#endif
9880
9881 #define __get_user_unaligned __get_user
9882 #define __put_user_unaligned __put_user
9883 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
9884 #define get_user_ex(x, ptr) do { \
9885 unsigned long __gue_val; \
9886 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9887 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9888 + (x) = (__typeof__(*(ptr)))__gue_val; \
9889 } while (0)
9890
9891 #ifdef CONFIG_X86_WP_WORKS_OK
9892 @@ -567,6 +628,7 @@ extern struct movsl_mask {
9893
9894 #define ARCH_HAS_NOCACHE_UACCESS 1
9895
9896 +#define ARCH_HAS_SORT_EXTABLE
9897 #ifdef CONFIG_X86_32
9898 # include "uaccess_32.h"
9899 #else
9900 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vgtod.h linux-2.6.39.4/arch/x86/include/asm/vgtod.h
9901 --- linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-05-19 00:06:34.000000000 -0400
9902 +++ linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-08-05 19:44:33.000000000 -0400
9903 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9904 int sysctl_enabled;
9905 struct timezone sys_tz;
9906 struct { /* extract of a clocksource struct */
9907 + char name[8];
9908 cycle_t (*vread)(void);
9909 cycle_t cycle_last;
9910 cycle_t mask;
9911 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vsyscall.h linux-2.6.39.4/arch/x86/include/asm/vsyscall.h
9912 --- linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-05-19 00:06:34.000000000 -0400
9913 +++ linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-08-05 19:44:33.000000000 -0400
9914 @@ -15,9 +15,10 @@ enum vsyscall_num {
9915
9916 #ifdef __KERNEL__
9917 #include <linux/seqlock.h>
9918 +#include <linux/getcpu.h>
9919 +#include <linux/time.h>
9920
9921 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
9922 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
9923
9924 /* Definitions for CONFIG_GENERIC_TIME definitions */
9925 #define __section_vsyscall_gtod_data __attribute__ \
9926 @@ -31,7 +32,6 @@ enum vsyscall_num {
9927 #define VGETCPU_LSL 2
9928
9929 extern int __vgetcpu_mode;
9930 -extern volatile unsigned long __jiffies;
9931
9932 /* kernel space (writeable) */
9933 extern int vgetcpu_mode;
9934 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
9935
9936 extern void map_vsyscall(void);
9937
9938 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
9939 +extern time_t vtime(time_t *t);
9940 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
9941 #endif /* __KERNEL__ */
9942
9943 #endif /* _ASM_X86_VSYSCALL_H */
9944 diff -urNp linux-2.6.39.4/arch/x86/include/asm/x86_init.h linux-2.6.39.4/arch/x86/include/asm/x86_init.h
9945 --- linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-05-19 00:06:34.000000000 -0400
9946 +++ linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-08-05 20:34:06.000000000 -0400
9947 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9948 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9949 void (*find_smp_config)(void);
9950 void (*get_smp_config)(unsigned int early);
9951 -};
9952 +} __no_const;
9953
9954 /**
9955 * struct x86_init_resources - platform specific resource related ops
9956 @@ -42,7 +42,7 @@ struct x86_init_resources {
9957 void (*probe_roms)(void);
9958 void (*reserve_resources)(void);
9959 char *(*memory_setup)(void);
9960 -};
9961 +} __no_const;
9962
9963 /**
9964 * struct x86_init_irqs - platform specific interrupt setup
9965 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9966 void (*pre_vector_init)(void);
9967 void (*intr_init)(void);
9968 void (*trap_init)(void);
9969 -};
9970 +} __no_const;
9971
9972 /**
9973 * struct x86_init_oem - oem platform specific customizing functions
9974 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9975 struct x86_init_oem {
9976 void (*arch_setup)(void);
9977 void (*banner)(void);
9978 -};
9979 +} __no_const;
9980
9981 /**
9982 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9983 @@ -76,7 +76,7 @@ struct x86_init_oem {
9984 */
9985 struct x86_init_mapping {
9986 void (*pagetable_reserve)(u64 start, u64 end);
9987 -};
9988 +} __no_const;
9989
9990 /**
9991 * struct x86_init_paging - platform specific paging functions
9992 @@ -86,7 +86,7 @@ struct x86_init_mapping {
9993 struct x86_init_paging {
9994 void (*pagetable_setup_start)(pgd_t *base);
9995 void (*pagetable_setup_done)(pgd_t *base);
9996 -};
9997 +} __no_const;
9998
9999 /**
10000 * struct x86_init_timers - platform specific timer setup
10001 @@ -101,7 +101,7 @@ struct x86_init_timers {
10002 void (*tsc_pre_init)(void);
10003 void (*timer_init)(void);
10004 void (*wallclock_init)(void);
10005 -};
10006 +} __no_const;
10007
10008 /**
10009 * struct x86_init_iommu - platform specific iommu setup
10010 @@ -109,7 +109,7 @@ struct x86_init_timers {
10011 */
10012 struct x86_init_iommu {
10013 int (*iommu_init)(void);
10014 -};
10015 +} __no_const;
10016
10017 /**
10018 * struct x86_init_pci - platform specific pci init functions
10019 @@ -123,7 +123,7 @@ struct x86_init_pci {
10020 int (*init)(void);
10021 void (*init_irq)(void);
10022 void (*fixup_irqs)(void);
10023 -};
10024 +} __no_const;
10025
10026 /**
10027 * struct x86_init_ops - functions for platform specific setup
10028 @@ -139,7 +139,7 @@ struct x86_init_ops {
10029 struct x86_init_timers timers;
10030 struct x86_init_iommu iommu;
10031 struct x86_init_pci pci;
10032 -};
10033 +} __no_const;
10034
10035 /**
10036 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10037 @@ -147,7 +147,7 @@ struct x86_init_ops {
10038 */
10039 struct x86_cpuinit_ops {
10040 void (*setup_percpu_clockev)(void);
10041 -};
10042 +} __no_const;
10043
10044 /**
10045 * struct x86_platform_ops - platform specific runtime functions
10046 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10047 bool (*is_untracked_pat_range)(u64 start, u64 end);
10048 void (*nmi_init)(void);
10049 int (*i8042_detect)(void);
10050 -};
10051 +} __no_const;
10052
10053 struct pci_dev;
10054
10055 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10056 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10057 void (*teardown_msi_irq)(unsigned int irq);
10058 void (*teardown_msi_irqs)(struct pci_dev *dev);
10059 -};
10060 +} __no_const;
10061
10062 extern struct x86_init_ops x86_init;
10063 extern struct x86_cpuinit_ops x86_cpuinit;
10064 diff -urNp linux-2.6.39.4/arch/x86/include/asm/xsave.h linux-2.6.39.4/arch/x86/include/asm/xsave.h
10065 --- linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-05-19 00:06:34.000000000 -0400
10066 +++ linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-08-05 19:44:33.000000000 -0400
10067 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10068 {
10069 int err;
10070
10071 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10072 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10073 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10074 +#endif
10075 +
10076 /*
10077 * Clear the xsave header first, so that reserved fields are
10078 * initialized to zero.
10079 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10080 u32 lmask = mask;
10081 u32 hmask = mask >> 32;
10082
10083 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10084 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10085 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10086 +#endif
10087 +
10088 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10089 "2:\n"
10090 ".section .fixup,\"ax\"\n"
10091 diff -urNp linux-2.6.39.4/arch/x86/Kconfig linux-2.6.39.4/arch/x86/Kconfig
10092 --- linux-2.6.39.4/arch/x86/Kconfig 2011-05-19 00:06:34.000000000 -0400
10093 +++ linux-2.6.39.4/arch/x86/Kconfig 2011-08-05 19:44:33.000000000 -0400
10094 @@ -224,7 +224,7 @@ config X86_HT
10095
10096 config X86_32_LAZY_GS
10097 def_bool y
10098 - depends on X86_32 && !CC_STACKPROTECTOR
10099 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10100
10101 config ARCH_HWEIGHT_CFLAGS
10102 string
10103 @@ -1022,7 +1022,7 @@ choice
10104
10105 config NOHIGHMEM
10106 bool "off"
10107 - depends on !X86_NUMAQ
10108 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10109 ---help---
10110 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10111 However, the address space of 32-bit x86 processors is only 4
10112 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
10113
10114 config HIGHMEM4G
10115 bool "4GB"
10116 - depends on !X86_NUMAQ
10117 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10118 ---help---
10119 Select this if you have a 32-bit processor and between 1 and 4
10120 gigabytes of physical RAM.
10121 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
10122 hex
10123 default 0xB0000000 if VMSPLIT_3G_OPT
10124 default 0x80000000 if VMSPLIT_2G
10125 - default 0x78000000 if VMSPLIT_2G_OPT
10126 + default 0x70000000 if VMSPLIT_2G_OPT
10127 default 0x40000000 if VMSPLIT_1G
10128 default 0xC0000000
10129 depends on X86_32
10130 @@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED
10131
10132 config EFI
10133 bool "EFI runtime service support"
10134 - depends on ACPI
10135 + depends on ACPI && !PAX_KERNEXEC
10136 ---help---
10137 This enables the kernel to use EFI runtime services that are
10138 available (such as the EFI variable services).
10139 @@ -1487,6 +1487,7 @@ config SECCOMP
10140
10141 config CC_STACKPROTECTOR
10142 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10143 + depends on X86_64 || !PAX_MEMORY_UDEREF
10144 ---help---
10145 This option turns on the -fstack-protector GCC feature. This
10146 feature puts, at the beginning of functions, a canary value on
10147 @@ -1544,6 +1545,7 @@ config KEXEC_JUMP
10148 config PHYSICAL_START
10149 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10150 default "0x1000000"
10151 + range 0x400000 0x40000000
10152 ---help---
10153 This gives the physical address where the kernel is loaded.
10154
10155 @@ -1607,6 +1609,7 @@ config X86_NEED_RELOCS
10156 config PHYSICAL_ALIGN
10157 hex "Alignment value to which kernel should be aligned" if X86_32
10158 default "0x1000000"
10159 + range 0x400000 0x1000000 if PAX_KERNEXEC
10160 range 0x2000 0x1000000
10161 ---help---
10162 This value puts the alignment restrictions on physical address
10163 @@ -1638,9 +1641,10 @@ config HOTPLUG_CPU
10164 Say N if you want to disable CPU hotplug.
10165
10166 config COMPAT_VDSO
10167 - def_bool y
10168 + def_bool n
10169 prompt "Compat VDSO support"
10170 depends on X86_32 || IA32_EMULATION
10171 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10172 ---help---
10173 Map the 32-bit VDSO to the predictable old-style address too.
10174
10175 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.cpu linux-2.6.39.4/arch/x86/Kconfig.cpu
10176 --- linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-05-19 00:06:34.000000000 -0400
10177 +++ linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-08-05 19:44:33.000000000 -0400
10178 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
10179
10180 config X86_F00F_BUG
10181 def_bool y
10182 - depends on M586MMX || M586TSC || M586 || M486 || M386
10183 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10184
10185 config X86_INVD_BUG
10186 def_bool y
10187 @@ -358,7 +358,7 @@ config X86_POPAD_OK
10188
10189 config X86_ALIGNMENT_16
10190 def_bool y
10191 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10192 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10193
10194 config X86_INTEL_USERCOPY
10195 def_bool y
10196 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
10197 # generates cmov.
10198 config X86_CMOV
10199 def_bool y
10200 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10201 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10202
10203 config X86_MINIMUM_CPU_FAMILY
10204 int
10205 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.debug linux-2.6.39.4/arch/x86/Kconfig.debug
10206 --- linux-2.6.39.4/arch/x86/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
10207 +++ linux-2.6.39.4/arch/x86/Kconfig.debug 2011-08-05 19:44:33.000000000 -0400
10208 @@ -101,7 +101,7 @@ config X86_PTDUMP
10209 config DEBUG_RODATA
10210 bool "Write protect kernel read-only data structures"
10211 default y
10212 - depends on DEBUG_KERNEL
10213 + depends on DEBUG_KERNEL && BROKEN
10214 ---help---
10215 Mark the kernel read-only data as write-protected in the pagetables,
10216 in order to catch accidental (and incorrect) writes to such const
10217 @@ -119,7 +119,7 @@ config DEBUG_RODATA_TEST
10218
10219 config DEBUG_SET_MODULE_RONX
10220 bool "Set loadable kernel module data as NX and text as RO"
10221 - depends on MODULES
10222 + depends on MODULES && BROKEN
10223 ---help---
10224 This option helps catch unintended modifications to loadable
10225 kernel module's text and read-only data. It also prevents execution
10226 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile
10227 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-05-19 00:06:34.000000000 -0400
10228 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-05 20:34:06.000000000 -0400
10229 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10230 $(call cc-option, -fno-stack-protector) \
10231 $(call cc-option, -mpreferred-stack-boundary=2)
10232 KBUILD_CFLAGS += $(call cc-option, -m32)
10233 +ifdef CONSTIFY_PLUGIN
10234 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10235 +endif
10236 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10237 GCOV_PROFILE := n
10238
10239 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S
10240 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-09 09:18:51.000000000 -0400
10241 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-05 19:44:33.000000000 -0400
10242 @@ -108,6 +108,9 @@ wakeup_code:
10243 /* Do any other stuff... */
10244
10245 #ifndef CONFIG_64BIT
10246 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10247 + call verify_cpu
10248 +
10249 /* This could also be done in C code... */
10250 movl pmode_cr3, %eax
10251 movl %eax, %cr3
10252 @@ -131,6 +134,7 @@ wakeup_code:
10253 movl pmode_cr0, %eax
10254 movl %eax, %cr0
10255 jmp pmode_return
10256 +# include "../../verify_cpu.S"
10257 #else
10258 pushw $0
10259 pushw trampoline_segment
10260 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c
10261 --- linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-07-09 09:18:51.000000000 -0400
10262 +++ linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-08-05 19:44:33.000000000 -0400
10263 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10264 header->trampoline_segment = trampoline_address() >> 4;
10265 #ifdef CONFIG_SMP
10266 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10267 +
10268 + pax_open_kernel();
10269 early_gdt_descr.address =
10270 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10271 + pax_close_kernel();
10272 +
10273 initial_gs = per_cpu_offset(smp_processor_id());
10274 #endif
10275 initial_code = (unsigned long)wakeup_long64;
10276 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S
10277 --- linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-05-19 00:06:34.000000000 -0400
10278 +++ linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-05 19:44:33.000000000 -0400
10279 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10280 # and restore the stack ... but you need gdt for this to work
10281 movl saved_context_esp, %esp
10282
10283 - movl %cs:saved_magic, %eax
10284 - cmpl $0x12345678, %eax
10285 + cmpl $0x12345678, saved_magic
10286 jne bogus_magic
10287
10288 # jump to place where we left off
10289 - movl saved_eip, %eax
10290 - jmp *%eax
10291 + jmp *(saved_eip)
10292
10293 bogus_magic:
10294 jmp bogus_magic
10295 diff -urNp linux-2.6.39.4/arch/x86/kernel/alternative.c linux-2.6.39.4/arch/x86/kernel/alternative.c
10296 --- linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-05-19 00:06:34.000000000 -0400
10297 +++ linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-08-05 19:44:33.000000000 -0400
10298 @@ -248,7 +248,7 @@ static void alternatives_smp_lock(const
10299 if (!*poff || ptr < text || ptr >= text_end)
10300 continue;
10301 /* turn DS segment override prefix into lock prefix */
10302 - if (*ptr == 0x3e)
10303 + if (*ktla_ktva(ptr) == 0x3e)
10304 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10305 };
10306 mutex_unlock(&text_mutex);
10307 @@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons
10308 if (!*poff || ptr < text || ptr >= text_end)
10309 continue;
10310 /* turn lock prefix into DS segment override prefix */
10311 - if (*ptr == 0xf0)
10312 + if (*ktla_ktva(ptr) == 0xf0)
10313 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10314 };
10315 mutex_unlock(&text_mutex);
10316 @@ -438,7 +438,7 @@ void __init_or_module apply_paravirt(str
10317
10318 BUG_ON(p->len > MAX_PATCH_LEN);
10319 /* prep the buffer with the original instructions */
10320 - memcpy(insnbuf, p->instr, p->len);
10321 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10322 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10323 (unsigned long)p->instr, p->len);
10324
10325 @@ -506,7 +506,7 @@ void __init alternative_instructions(voi
10326 if (smp_alt_once)
10327 free_init_pages("SMP alternatives",
10328 (unsigned long)__smp_locks,
10329 - (unsigned long)__smp_locks_end);
10330 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10331
10332 restart_nmi();
10333 }
10334 @@ -523,13 +523,17 @@ void __init alternative_instructions(voi
10335 * instructions. And on the local CPU you need to be protected again NMI or MCE
10336 * handlers seeing an inconsistent instruction while you patch.
10337 */
10338 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10339 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10340 size_t len)
10341 {
10342 unsigned long flags;
10343 local_irq_save(flags);
10344 - memcpy(addr, opcode, len);
10345 +
10346 + pax_open_kernel();
10347 + memcpy(ktla_ktva(addr), opcode, len);
10348 sync_core();
10349 + pax_close_kernel();
10350 +
10351 local_irq_restore(flags);
10352 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10353 that causes hangs on some VIA CPUs. */
10354 @@ -551,36 +555,22 @@ void *__init_or_module text_poke_early(v
10355 */
10356 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10357 {
10358 - unsigned long flags;
10359 - char *vaddr;
10360 + unsigned char *vaddr = ktla_ktva(addr);
10361 struct page *pages[2];
10362 - int i;
10363 + size_t i;
10364
10365 if (!core_kernel_text((unsigned long)addr)) {
10366 - pages[0] = vmalloc_to_page(addr);
10367 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10368 + pages[0] = vmalloc_to_page(vaddr);
10369 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10370 } else {
10371 - pages[0] = virt_to_page(addr);
10372 + pages[0] = virt_to_page(vaddr);
10373 WARN_ON(!PageReserved(pages[0]));
10374 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10375 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10376 }
10377 BUG_ON(!pages[0]);
10378 - local_irq_save(flags);
10379 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10380 - if (pages[1])
10381 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10382 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10383 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10384 - clear_fixmap(FIX_TEXT_POKE0);
10385 - if (pages[1])
10386 - clear_fixmap(FIX_TEXT_POKE1);
10387 - local_flush_tlb();
10388 - sync_core();
10389 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10390 - that causes hangs on some VIA CPUs. */
10391 + text_poke_early(addr, opcode, len);
10392 for (i = 0; i < len; i++)
10393 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10394 - local_irq_restore(flags);
10395 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10396 return addr;
10397 }
10398
10399 @@ -682,9 +672,9 @@ void __kprobes text_poke_smp_batch(struc
10400 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
10401
10402 #ifdef CONFIG_X86_64
10403 -unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10404 +unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10405 #else
10406 -unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10407 +unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10408 #endif
10409
10410 void __init arch_init_ideal_nop5(void)
10411 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/apic.c linux-2.6.39.4/arch/x86/kernel/apic/apic.c
10412 --- linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-05-19 00:06:34.000000000 -0400
10413 +++ linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-08-17 20:01:50.000000000 -0400
10414 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10415 /*
10416 * Debug level, exported for io_apic.c
10417 */
10418 -unsigned int apic_verbosity;
10419 +int apic_verbosity;
10420
10421 int pic_mode;
10422
10423 @@ -1821,7 +1821,7 @@ void smp_error_interrupt(struct pt_regs
10424 apic_write(APIC_ESR, 0);
10425 v1 = apic_read(APIC_ESR);
10426 ack_APIC_irq();
10427 - atomic_inc(&irq_err_count);
10428 + atomic_inc_unchecked(&irq_err_count);
10429
10430 /*
10431 * Here is what the APIC error bits mean:
10432 @@ -2204,6 +2204,8 @@ static int __cpuinit apic_cluster_num(vo
10433 u16 *bios_cpu_apicid;
10434 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10435
10436 + pax_track_stack();
10437 +
10438 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10439 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10440
10441 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c
10442 --- linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:04:13.000000000 -0400
10443 +++ linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-08-05 19:44:33.000000000 -0400
10444 @@ -623,7 +623,7 @@ struct IO_APIC_route_entry **alloc_ioapi
10445 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
10446 GFP_ATOMIC);
10447 if (!ioapic_entries)
10448 - return 0;
10449 + return NULL;
10450
10451 for (apic = 0; apic < nr_ioapics; apic++) {
10452 ioapic_entries[apic] =
10453 @@ -640,7 +640,7 @@ nomem:
10454 kfree(ioapic_entries[apic]);
10455 kfree(ioapic_entries);
10456
10457 - return 0;
10458 + return NULL;
10459 }
10460
10461 /*
10462 @@ -1040,7 +1040,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10463 }
10464 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10465
10466 -void lock_vector_lock(void)
10467 +void lock_vector_lock(void) __acquires(vector_lock)
10468 {
10469 /* Used to the online set of cpus does not change
10470 * during assign_irq_vector.
10471 @@ -1048,7 +1048,7 @@ void lock_vector_lock(void)
10472 raw_spin_lock(&vector_lock);
10473 }
10474
10475 -void unlock_vector_lock(void)
10476 +void unlock_vector_lock(void) __releases(vector_lock)
10477 {
10478 raw_spin_unlock(&vector_lock);
10479 }
10480 @@ -2379,7 +2379,7 @@ static void ack_apic_edge(struct irq_dat
10481 ack_APIC_irq();
10482 }
10483
10484 -atomic_t irq_mis_count;
10485 +atomic_unchecked_t irq_mis_count;
10486
10487 /*
10488 * IO-APIC versions below 0x20 don't support EOI register.
10489 @@ -2487,7 +2487,7 @@ static void ack_apic_level(struct irq_da
10490 * at the cpu.
10491 */
10492 if (!(v & (1 << (i & 0x1f)))) {
10493 - atomic_inc(&irq_mis_count);
10494 + atomic_inc_unchecked(&irq_mis_count);
10495
10496 eoi_ioapic_irq(irq, cfg);
10497 }
10498 diff -urNp linux-2.6.39.4/arch/x86/kernel/apm_32.c linux-2.6.39.4/arch/x86/kernel/apm_32.c
10499 --- linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-05-19 00:06:34.000000000 -0400
10500 +++ linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-08-05 19:44:33.000000000 -0400
10501 @@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
10502 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10503 * even though they are called in protected mode.
10504 */
10505 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10506 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10507 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10508
10509 static const char driver_version[] = "1.16ac"; /* no spaces */
10510 @@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
10511 BUG_ON(cpu != 0);
10512 gdt = get_cpu_gdt_table(cpu);
10513 save_desc_40 = gdt[0x40 / 8];
10514 +
10515 + pax_open_kernel();
10516 gdt[0x40 / 8] = bad_bios_desc;
10517 + pax_close_kernel();
10518
10519 apm_irq_save(flags);
10520 APM_DO_SAVE_SEGS;
10521 @@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
10522 &call->esi);
10523 APM_DO_RESTORE_SEGS;
10524 apm_irq_restore(flags);
10525 +
10526 + pax_open_kernel();
10527 gdt[0x40 / 8] = save_desc_40;
10528 + pax_close_kernel();
10529 +
10530 put_cpu();
10531
10532 return call->eax & 0xff;
10533 @@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void
10534 BUG_ON(cpu != 0);
10535 gdt = get_cpu_gdt_table(cpu);
10536 save_desc_40 = gdt[0x40 / 8];
10537 +
10538 + pax_open_kernel();
10539 gdt[0x40 / 8] = bad_bios_desc;
10540 + pax_close_kernel();
10541
10542 apm_irq_save(flags);
10543 APM_DO_SAVE_SEGS;
10544 @@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void
10545 &call->eax);
10546 APM_DO_RESTORE_SEGS;
10547 apm_irq_restore(flags);
10548 +
10549 + pax_open_kernel();
10550 gdt[0x40 / 8] = save_desc_40;
10551 + pax_close_kernel();
10552 +
10553 put_cpu();
10554 return error;
10555 }
10556 @@ -2351,12 +2365,15 @@ static int __init apm_init(void)
10557 * code to that CPU.
10558 */
10559 gdt = get_cpu_gdt_table(0);
10560 +
10561 + pax_open_kernel();
10562 set_desc_base(&gdt[APM_CS >> 3],
10563 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10564 set_desc_base(&gdt[APM_CS_16 >> 3],
10565 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10566 set_desc_base(&gdt[APM_DS >> 3],
10567 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10568 + pax_close_kernel();
10569
10570 proc_create("apm", 0, NULL, &apm_file_ops);
10571
10572 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c
10573 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-05-19 00:06:34.000000000 -0400
10574 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-08-05 19:44:33.000000000 -0400
10575 @@ -69,6 +69,7 @@ int main(void)
10576 BLANK();
10577 #undef ENTRY
10578
10579 + DEFINE(TSS_size, sizeof(struct tss_struct));
10580 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10581 BLANK();
10582
10583 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets.c linux-2.6.39.4/arch/x86/kernel/asm-offsets.c
10584 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-05-19 00:06:34.000000000 -0400
10585 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-08-05 19:44:33.000000000 -0400
10586 @@ -33,6 +33,8 @@ void common(void) {
10587 OFFSET(TI_status, thread_info, status);
10588 OFFSET(TI_addr_limit, thread_info, addr_limit);
10589 OFFSET(TI_preempt_count, thread_info, preempt_count);
10590 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10591 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10592
10593 BLANK();
10594 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10595 @@ -53,8 +55,26 @@ void common(void) {
10596 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10597 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10598 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10599 +
10600 +#ifdef CONFIG_PAX_KERNEXEC
10601 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10602 +#endif
10603 +
10604 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10605 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10606 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10607 +#ifdef CONFIG_X86_64
10608 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
10609 +#endif
10610 #endif
10611
10612 +#endif
10613 +
10614 + BLANK();
10615 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10616 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10617 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10618 +
10619 #ifdef CONFIG_XEN
10620 BLANK();
10621 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10622 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/amd.c linux-2.6.39.4/arch/x86/kernel/cpu/amd.c
10623 --- linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-06-03 00:04:13.000000000 -0400
10624 +++ linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-08-05 19:44:33.000000000 -0400
10625 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10626 unsigned int size)
10627 {
10628 /* AMD errata T13 (order #21922) */
10629 - if ((c->x86 == 6)) {
10630 + if (c->x86 == 6) {
10631 /* Duron Rev A0 */
10632 if (c->x86_model == 3 && c->x86_mask == 0)
10633 size = 64;
10634 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/common.c linux-2.6.39.4/arch/x86/kernel/cpu/common.c
10635 --- linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-06-03 00:04:13.000000000 -0400
10636 +++ linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-08-05 19:44:33.000000000 -0400
10637 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10638
10639 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10640
10641 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10642 -#ifdef CONFIG_X86_64
10643 - /*
10644 - * We need valid kernel segments for data and code in long mode too
10645 - * IRET will check the segment types kkeil 2000/10/28
10646 - * Also sysret mandates a special GDT layout
10647 - *
10648 - * TLS descriptors are currently at a different place compared to i386.
10649 - * Hopefully nobody expects them at a fixed place (Wine?)
10650 - */
10651 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10652 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10653 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10654 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10655 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10656 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10657 -#else
10658 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10659 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10660 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10661 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10662 - /*
10663 - * Segments used for calling PnP BIOS have byte granularity.
10664 - * They code segments and data segments have fixed 64k limits,
10665 - * the transfer segment sizes are set at run time.
10666 - */
10667 - /* 32-bit code */
10668 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10669 - /* 16-bit code */
10670 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10671 - /* 16-bit data */
10672 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10673 - /* 16-bit data */
10674 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10675 - /* 16-bit data */
10676 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10677 - /*
10678 - * The APM segments have byte granularity and their bases
10679 - * are set at run time. All have 64k limits.
10680 - */
10681 - /* 32-bit code */
10682 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10683 - /* 16-bit code */
10684 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10685 - /* data */
10686 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10687 -
10688 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10689 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10690 - GDT_STACK_CANARY_INIT
10691 -#endif
10692 -} };
10693 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10694 -
10695 static int __init x86_xsave_setup(char *s)
10696 {
10697 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10698 @@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu)
10699 {
10700 struct desc_ptr gdt_descr;
10701
10702 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10703 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10704 gdt_descr.size = GDT_SIZE - 1;
10705 load_gdt(&gdt_descr);
10706 /* Reload the per-cpu base */
10707 @@ -824,6 +770,10 @@ static void __cpuinit identify_cpu(struc
10708 /* Filter out anything that depends on CPUID levels we don't have */
10709 filter_cpuid_features(c, true);
10710
10711 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10712 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10713 +#endif
10714 +
10715 /* If the model name is still unset, do table lookup. */
10716 if (!c->x86_model_id[0]) {
10717 const char *p;
10718 @@ -1003,6 +953,9 @@ static __init int setup_disablecpuid(cha
10719 }
10720 __setup("clearcpuid=", setup_disablecpuid);
10721
10722 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10723 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10724 +
10725 #ifdef CONFIG_X86_64
10726 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10727
10728 @@ -1018,7 +971,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10729 EXPORT_PER_CPU_SYMBOL(current_task);
10730
10731 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10732 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10733 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10734 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10735
10736 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10737 @@ -1083,7 +1036,7 @@ struct pt_regs * __cpuinit idle_regs(str
10738 {
10739 memset(regs, 0, sizeof(struct pt_regs));
10740 regs->fs = __KERNEL_PERCPU;
10741 - regs->gs = __KERNEL_STACK_CANARY;
10742 + savesegment(gs, regs->gs);
10743
10744 return regs;
10745 }
10746 @@ -1138,7 +1091,7 @@ void __cpuinit cpu_init(void)
10747 int i;
10748
10749 cpu = stack_smp_processor_id();
10750 - t = &per_cpu(init_tss, cpu);
10751 + t = init_tss + cpu;
10752 oist = &per_cpu(orig_ist, cpu);
10753
10754 #ifdef CONFIG_NUMA
10755 @@ -1164,7 +1117,7 @@ void __cpuinit cpu_init(void)
10756 switch_to_new_gdt(cpu);
10757 loadsegment(fs, 0);
10758
10759 - load_idt((const struct desc_ptr *)&idt_descr);
10760 + load_idt(&idt_descr);
10761
10762 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10763 syscall_init();
10764 @@ -1173,7 +1126,6 @@ void __cpuinit cpu_init(void)
10765 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10766 barrier();
10767
10768 - x86_configure_nx();
10769 if (cpu != 0)
10770 enable_x2apic();
10771
10772 @@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
10773 {
10774 int cpu = smp_processor_id();
10775 struct task_struct *curr = current;
10776 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10777 + struct tss_struct *t = init_tss + cpu;
10778 struct thread_struct *thread = &curr->thread;
10779
10780 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10781 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/intel.c linux-2.6.39.4/arch/x86/kernel/cpu/intel.c
10782 --- linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-05-19 00:06:34.000000000 -0400
10783 +++ linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-08-05 19:44:33.000000000 -0400
10784 @@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
10785 * Update the IDT descriptor and reload the IDT so that
10786 * it uses the read-only mapped virtual address.
10787 */
10788 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10789 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10790 load_idt(&idt_descr);
10791 }
10792 #endif
10793 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/Makefile linux-2.6.39.4/arch/x86/kernel/cpu/Makefile
10794 --- linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-05-19 00:06:34.000000000 -0400
10795 +++ linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-08-05 19:44:33.000000000 -0400
10796 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10797 CFLAGS_REMOVE_perf_event.o = -pg
10798 endif
10799
10800 -# Make sure load_percpu_segment has no stackprotector
10801 -nostackp := $(call cc-option, -fno-stack-protector)
10802 -CFLAGS_common.o := $(nostackp)
10803 -
10804 obj-y := intel_cacheinfo.o scattered.o topology.o
10805 obj-y += proc.o capflags.o powerflags.o common.o
10806 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10807 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c
10808 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-19 00:06:34.000000000 -0400
10809 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-05 19:44:33.000000000 -0400
10810 @@ -46,6 +46,7 @@
10811 #include <asm/ipi.h>
10812 #include <asm/mce.h>
10813 #include <asm/msr.h>
10814 +#include <asm/local.h>
10815
10816 #include "mce-internal.h"
10817
10818 @@ -220,7 +221,7 @@ static void print_mce(struct mce *m)
10819 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10820 m->cs, m->ip);
10821
10822 - if (m->cs == __KERNEL_CS)
10823 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10824 print_symbol("{%s}", m->ip);
10825 pr_cont("\n");
10826 }
10827 @@ -244,10 +245,10 @@ static void print_mce(struct mce *m)
10828
10829 #define PANIC_TIMEOUT 5 /* 5 seconds */
10830
10831 -static atomic_t mce_paniced;
10832 +static atomic_unchecked_t mce_paniced;
10833
10834 static int fake_panic;
10835 -static atomic_t mce_fake_paniced;
10836 +static atomic_unchecked_t mce_fake_paniced;
10837
10838 /* Panic in progress. Enable interrupts and wait for final IPI */
10839 static void wait_for_panic(void)
10840 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10841 /*
10842 * Make sure only one CPU runs in machine check panic
10843 */
10844 - if (atomic_inc_return(&mce_paniced) > 1)
10845 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10846 wait_for_panic();
10847 barrier();
10848
10849 @@ -279,7 +280,7 @@ static void mce_panic(char *msg, struct
10850 console_verbose();
10851 } else {
10852 /* Don't log too much for fake panic */
10853 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10854 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10855 return;
10856 }
10857 /* First print corrected ones that are still unlogged */
10858 @@ -647,7 +648,7 @@ static int mce_timed_out(u64 *t)
10859 * might have been modified by someone else.
10860 */
10861 rmb();
10862 - if (atomic_read(&mce_paniced))
10863 + if (atomic_read_unchecked(&mce_paniced))
10864 wait_for_panic();
10865 if (!monarch_timeout)
10866 goto out;
10867 @@ -1461,14 +1462,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10868 */
10869
10870 static DEFINE_SPINLOCK(mce_state_lock);
10871 -static int open_count; /* #times opened */
10872 +static local_t open_count; /* #times opened */
10873 static int open_exclu; /* already open exclusive? */
10874
10875 static int mce_open(struct inode *inode, struct file *file)
10876 {
10877 spin_lock(&mce_state_lock);
10878
10879 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10880 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10881 spin_unlock(&mce_state_lock);
10882
10883 return -EBUSY;
10884 @@ -1476,7 +1477,7 @@ static int mce_open(struct inode *inode,
10885
10886 if (file->f_flags & O_EXCL)
10887 open_exclu = 1;
10888 - open_count++;
10889 + local_inc(&open_count);
10890
10891 spin_unlock(&mce_state_lock);
10892
10893 @@ -1487,7 +1488,7 @@ static int mce_release(struct inode *ino
10894 {
10895 spin_lock(&mce_state_lock);
10896
10897 - open_count--;
10898 + local_dec(&open_count);
10899 open_exclu = 0;
10900
10901 spin_unlock(&mce_state_lock);
10902 @@ -2174,7 +2175,7 @@ struct dentry *mce_get_debugfs_dir(void)
10903 static void mce_reset(void)
10904 {
10905 cpu_missing = 0;
10906 - atomic_set(&mce_fake_paniced, 0);
10907 + atomic_set_unchecked(&mce_fake_paniced, 0);
10908 atomic_set(&mce_executing, 0);
10909 atomic_set(&mce_callin, 0);
10910 atomic_set(&global_nwo, 0);
10911 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10912 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-05-19 00:06:34.000000000 -0400
10913 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:34:06.000000000 -0400
10914 @@ -215,7 +215,9 @@ static int inject_init(void)
10915 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10916 return -ENOMEM;
10917 printk(KERN_INFO "Machine check injector initialized\n");
10918 - mce_chrdev_ops.write = mce_write;
10919 + pax_open_kernel();
10920 + *(void **)&mce_chrdev_ops.write = mce_write;
10921 + pax_close_kernel();
10922 register_die_notifier(&mce_raise_nb);
10923 return 0;
10924 }
10925 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c
10926 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-05-19 00:06:34.000000000 -0400
10927 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-05 19:44:33.000000000 -0400
10928 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10929 u64 size_or_mask, size_and_mask;
10930 static bool mtrr_aps_delayed_init;
10931
10932 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10933 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10934
10935 const struct mtrr_ops *mtrr_if;
10936
10937 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10938 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-19 00:06:34.000000000 -0400
10939 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-05 20:34:06.000000000 -0400
10940 @@ -12,8 +12,8 @@
10941 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10942
10943 struct mtrr_ops {
10944 - u32 vendor;
10945 - u32 use_intel_if;
10946 + const u32 vendor;
10947 + const u32 use_intel_if;
10948 void (*set)(unsigned int reg, unsigned long base,
10949 unsigned long size, mtrr_type type);
10950 void (*set_all)(void);
10951 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c
10952 --- linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-05-19 00:06:34.000000000 -0400
10953 +++ linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-08-05 19:44:33.000000000 -0400
10954 @@ -774,6 +774,8 @@ static int x86_schedule_events(struct cp
10955 int i, j, w, wmax, num = 0;
10956 struct hw_perf_event *hwc;
10957
10958 + pax_track_stack();
10959 +
10960 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10961
10962 for (i = 0; i < n; i++) {
10963 @@ -1878,7 +1880,7 @@ perf_callchain_user(struct perf_callchai
10964 break;
10965
10966 perf_callchain_store(entry, frame.return_address);
10967 - fp = frame.next_frame;
10968 + fp = (__force const void __user *)frame.next_frame;
10969 }
10970 }
10971
10972 diff -urNp linux-2.6.39.4/arch/x86/kernel/crash.c linux-2.6.39.4/arch/x86/kernel/crash.c
10973 --- linux-2.6.39.4/arch/x86/kernel/crash.c 2011-05-19 00:06:34.000000000 -0400
10974 +++ linux-2.6.39.4/arch/x86/kernel/crash.c 2011-08-05 19:44:33.000000000 -0400
10975 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10976 regs = args->regs;
10977
10978 #ifdef CONFIG_X86_32
10979 - if (!user_mode_vm(regs)) {
10980 + if (!user_mode(regs)) {
10981 crash_fixup_ss_esp(&fixed_regs, regs);
10982 regs = &fixed_regs;
10983 }
10984 diff -urNp linux-2.6.39.4/arch/x86/kernel/doublefault_32.c linux-2.6.39.4/arch/x86/kernel/doublefault_32.c
10985 --- linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-05-19 00:06:34.000000000 -0400
10986 +++ linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-08-05 19:44:33.000000000 -0400
10987 @@ -11,7 +11,7 @@
10988
10989 #define DOUBLEFAULT_STACKSIZE (1024)
10990 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10991 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10992 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10993
10994 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10995
10996 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10997 unsigned long gdt, tss;
10998
10999 store_gdt(&gdt_desc);
11000 - gdt = gdt_desc.address;
11001 + gdt = (unsigned long)gdt_desc.address;
11002
11003 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11004
11005 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11006 /* 0x2 bit is always set */
11007 .flags = X86_EFLAGS_SF | 0x2,
11008 .sp = STACK_START,
11009 - .es = __USER_DS,
11010 + .es = __KERNEL_DS,
11011 .cs = __KERNEL_CS,
11012 .ss = __KERNEL_DS,
11013 - .ds = __USER_DS,
11014 + .ds = __KERNEL_DS,
11015 .fs = __KERNEL_PERCPU,
11016
11017 .__cr3 = __pa_nodebug(swapper_pg_dir),
11018 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c
11019 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-05-19 00:06:34.000000000 -0400
11020 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-08-05 19:44:33.000000000 -0400
11021 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11022 bp = stack_frame(task, regs);
11023
11024 for (;;) {
11025 - struct thread_info *context;
11026 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11027
11028 - context = (struct thread_info *)
11029 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11030 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11031 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11032
11033 - stack = (unsigned long *)context->previous_esp;
11034 - if (!stack)
11035 + if (stack_start == task_stack_page(task))
11036 break;
11037 + stack = *(unsigned long **)stack_start;
11038 if (ops->stack(data, "IRQ") < 0)
11039 break;
11040 touch_nmi_watchdog();
11041 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11042 * When in-kernel, we also print out the stack and code at the
11043 * time of the fault..
11044 */
11045 - if (!user_mode_vm(regs)) {
11046 + if (!user_mode(regs)) {
11047 unsigned int code_prologue = code_bytes * 43 / 64;
11048 unsigned int code_len = code_bytes;
11049 unsigned char c;
11050 u8 *ip;
11051 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11052
11053 printk(KERN_EMERG "Stack:\n");
11054 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11055
11056 printk(KERN_EMERG "Code: ");
11057
11058 - ip = (u8 *)regs->ip - code_prologue;
11059 + ip = (u8 *)regs->ip - code_prologue + cs_base;
11060 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11061 /* try starting at IP */
11062 - ip = (u8 *)regs->ip;
11063 + ip = (u8 *)regs->ip + cs_base;
11064 code_len = code_len - code_prologue + 1;
11065 }
11066 for (i = 0; i < code_len; i++, ip++) {
11067 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11068 printk(" Bad EIP value.");
11069 break;
11070 }
11071 - if (ip == (u8 *)regs->ip)
11072 + if (ip == (u8 *)regs->ip + cs_base)
11073 printk("<%02x> ", c);
11074 else
11075 printk("%02x ", c);
11076 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11077 {
11078 unsigned short ud2;
11079
11080 + ip = ktla_ktva(ip);
11081 if (ip < PAGE_OFFSET)
11082 return 0;
11083 if (probe_kernel_address((unsigned short *)ip, ud2))
11084 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c
11085 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-05-19 00:06:34.000000000 -0400
11086 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-08-05 19:44:33.000000000 -0400
11087 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11088 unsigned long *irq_stack_end =
11089 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11090 unsigned used = 0;
11091 - struct thread_info *tinfo;
11092 int graph = 0;
11093 unsigned long dummy;
11094 + void *stack_start;
11095
11096 if (!task)
11097 task = current;
11098 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11099 * current stack address. If the stacks consist of nested
11100 * exceptions
11101 */
11102 - tinfo = task_thread_info(task);
11103 for (;;) {
11104 char *id;
11105 unsigned long *estack_end;
11106 +
11107 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11108 &used, &id);
11109
11110 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11111 if (ops->stack(data, id) < 0)
11112 break;
11113
11114 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11115 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11116 data, estack_end, &graph);
11117 ops->stack(data, "<EOE>");
11118 /*
11119 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11120 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11121 if (ops->stack(data, "IRQ") < 0)
11122 break;
11123 - bp = ops->walk_stack(tinfo, stack, bp,
11124 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11125 ops, data, irq_stack_end, &graph);
11126 /*
11127 * We link to the next stack (which would be
11128 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11129 /*
11130 * This handles the process stack:
11131 */
11132 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11133 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11134 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11135 put_cpu();
11136 }
11137 EXPORT_SYMBOL(dump_trace);
11138 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack.c linux-2.6.39.4/arch/x86/kernel/dumpstack.c
11139 --- linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-05-19 00:06:34.000000000 -0400
11140 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-08-05 19:44:33.000000000 -0400
11141 @@ -2,6 +2,9 @@
11142 * Copyright (C) 1991, 1992 Linus Torvalds
11143 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11144 */
11145 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11146 +#define __INCLUDED_BY_HIDESYM 1
11147 +#endif
11148 #include <linux/kallsyms.h>
11149 #include <linux/kprobes.h>
11150 #include <linux/uaccess.h>
11151 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11152 static void
11153 print_ftrace_graph_addr(unsigned long addr, void *data,
11154 const struct stacktrace_ops *ops,
11155 - struct thread_info *tinfo, int *graph)
11156 + struct task_struct *task, int *graph)
11157 {
11158 - struct task_struct *task = tinfo->task;
11159 unsigned long ret_addr;
11160 int index = task->curr_ret_stack;
11161
11162 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11163 static inline void
11164 print_ftrace_graph_addr(unsigned long addr, void *data,
11165 const struct stacktrace_ops *ops,
11166 - struct thread_info *tinfo, int *graph)
11167 + struct task_struct *task, int *graph)
11168 { }
11169 #endif
11170
11171 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11172 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11173 */
11174
11175 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11176 - void *p, unsigned int size, void *end)
11177 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11178 {
11179 - void *t = tinfo;
11180 if (end) {
11181 if (p < end && p >= (end-THREAD_SIZE))
11182 return 1;
11183 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11184 }
11185
11186 unsigned long
11187 -print_context_stack(struct thread_info *tinfo,
11188 +print_context_stack(struct task_struct *task, void *stack_start,
11189 unsigned long *stack, unsigned long bp,
11190 const struct stacktrace_ops *ops, void *data,
11191 unsigned long *end, int *graph)
11192 {
11193 struct stack_frame *frame = (struct stack_frame *)bp;
11194
11195 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11196 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11197 unsigned long addr;
11198
11199 addr = *stack;
11200 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11201 } else {
11202 ops->address(data, addr, 0);
11203 }
11204 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11205 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11206 }
11207 stack++;
11208 }
11209 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11210 EXPORT_SYMBOL_GPL(print_context_stack);
11211
11212 unsigned long
11213 -print_context_stack_bp(struct thread_info *tinfo,
11214 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11215 unsigned long *stack, unsigned long bp,
11216 const struct stacktrace_ops *ops, void *data,
11217 unsigned long *end, int *graph)
11218 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11219 struct stack_frame *frame = (struct stack_frame *)bp;
11220 unsigned long *ret_addr = &frame->return_address;
11221
11222 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11223 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11224 unsigned long addr = *ret_addr;
11225
11226 if (!__kernel_text_address(addr))
11227 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11228 ops->address(data, addr, 1);
11229 frame = frame->next_frame;
11230 ret_addr = &frame->return_address;
11231 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11232 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11233 }
11234
11235 return (unsigned long)frame;
11236 @@ -202,7 +202,7 @@ void dump_stack(void)
11237
11238 bp = stack_frame(current, NULL);
11239 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11240 - current->pid, current->comm, print_tainted(),
11241 + task_pid_nr(current), current->comm, print_tainted(),
11242 init_utsname()->release,
11243 (int)strcspn(init_utsname()->version, " "),
11244 init_utsname()->version);
11245 @@ -238,6 +238,8 @@ unsigned __kprobes long oops_begin(void)
11246 }
11247 EXPORT_SYMBOL_GPL(oops_begin);
11248
11249 +extern void gr_handle_kernel_exploit(void);
11250 +
11251 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11252 {
11253 if (regs && kexec_should_crash(current))
11254 @@ -259,7 +261,10 @@ void __kprobes oops_end(unsigned long fl
11255 panic("Fatal exception in interrupt");
11256 if (panic_on_oops)
11257 panic("Fatal exception");
11258 - do_exit(signr);
11259 +
11260 + gr_handle_kernel_exploit();
11261 +
11262 + do_group_exit(signr);
11263 }
11264
11265 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11266 @@ -286,7 +291,7 @@ int __kprobes __die(const char *str, str
11267
11268 show_registers(regs);
11269 #ifdef CONFIG_X86_32
11270 - if (user_mode_vm(regs)) {
11271 + if (user_mode(regs)) {
11272 sp = regs->sp;
11273 ss = regs->ss & 0xffff;
11274 } else {
11275 @@ -314,7 +319,7 @@ void die(const char *str, struct pt_regs
11276 unsigned long flags = oops_begin();
11277 int sig = SIGSEGV;
11278
11279 - if (!user_mode_vm(regs))
11280 + if (!user_mode(regs))
11281 report_bug(regs->ip, regs);
11282
11283 if (__die(str, regs, err))
11284 diff -urNp linux-2.6.39.4/arch/x86/kernel/early_printk.c linux-2.6.39.4/arch/x86/kernel/early_printk.c
11285 --- linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-05-19 00:06:34.000000000 -0400
11286 +++ linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-08-05 19:44:33.000000000 -0400
11287 @@ -7,6 +7,7 @@
11288 #include <linux/pci_regs.h>
11289 #include <linux/pci_ids.h>
11290 #include <linux/errno.h>
11291 +#include <linux/sched.h>
11292 #include <asm/io.h>
11293 #include <asm/processor.h>
11294 #include <asm/fcntl.h>
11295 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11296 int n;
11297 va_list ap;
11298
11299 + pax_track_stack();
11300 +
11301 va_start(ap, fmt);
11302 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11303 early_console->write(early_console, buf, n);
11304 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_32.S linux-2.6.39.4/arch/x86/kernel/entry_32.S
11305 --- linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-05-19 00:06:34.000000000 -0400
11306 +++ linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-08-05 19:44:33.000000000 -0400
11307 @@ -185,13 +185,146 @@
11308 /*CFI_REL_OFFSET gs, PT_GS*/
11309 .endm
11310 .macro SET_KERNEL_GS reg
11311 +
11312 +#ifdef CONFIG_CC_STACKPROTECTOR
11313 movl $(__KERNEL_STACK_CANARY), \reg
11314 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11315 + movl $(__USER_DS), \reg
11316 +#else
11317 + xorl \reg, \reg
11318 +#endif
11319 +
11320 movl \reg, %gs
11321 .endm
11322
11323 #endif /* CONFIG_X86_32_LAZY_GS */
11324
11325 -.macro SAVE_ALL
11326 +.macro pax_enter_kernel
11327 +#ifdef CONFIG_PAX_KERNEXEC
11328 + call pax_enter_kernel
11329 +#endif
11330 +.endm
11331 +
11332 +.macro pax_exit_kernel
11333 +#ifdef CONFIG_PAX_KERNEXEC
11334 + call pax_exit_kernel
11335 +#endif
11336 +.endm
11337 +
11338 +#ifdef CONFIG_PAX_KERNEXEC
11339 +ENTRY(pax_enter_kernel)
11340 +#ifdef CONFIG_PARAVIRT
11341 + pushl %eax
11342 + pushl %ecx
11343 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11344 + mov %eax, %esi
11345 +#else
11346 + mov %cr0, %esi
11347 +#endif
11348 + bts $16, %esi
11349 + jnc 1f
11350 + mov %cs, %esi
11351 + cmp $__KERNEL_CS, %esi
11352 + jz 3f
11353 + ljmp $__KERNEL_CS, $3f
11354 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11355 +2:
11356 +#ifdef CONFIG_PARAVIRT
11357 + mov %esi, %eax
11358 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11359 +#else
11360 + mov %esi, %cr0
11361 +#endif
11362 +3:
11363 +#ifdef CONFIG_PARAVIRT
11364 + popl %ecx
11365 + popl %eax
11366 +#endif
11367 + ret
11368 +ENDPROC(pax_enter_kernel)
11369 +
11370 +ENTRY(pax_exit_kernel)
11371 +#ifdef CONFIG_PARAVIRT
11372 + pushl %eax
11373 + pushl %ecx
11374 +#endif
11375 + mov %cs, %esi
11376 + cmp $__KERNEXEC_KERNEL_CS, %esi
11377 + jnz 2f
11378 +#ifdef CONFIG_PARAVIRT
11379 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11380 + mov %eax, %esi
11381 +#else
11382 + mov %cr0, %esi
11383 +#endif
11384 + btr $16, %esi
11385 + ljmp $__KERNEL_CS, $1f
11386 +1:
11387 +#ifdef CONFIG_PARAVIRT
11388 + mov %esi, %eax
11389 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11390 +#else
11391 + mov %esi, %cr0
11392 +#endif
11393 +2:
11394 +#ifdef CONFIG_PARAVIRT
11395 + popl %ecx
11396 + popl %eax
11397 +#endif
11398 + ret
11399 +ENDPROC(pax_exit_kernel)
11400 +#endif
11401 +
11402 +.macro pax_erase_kstack
11403 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11404 + call pax_erase_kstack
11405 +#endif
11406 +.endm
11407 +
11408 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11409 +/*
11410 + * ebp: thread_info
11411 + * ecx, edx: can be clobbered
11412 + */
11413 +ENTRY(pax_erase_kstack)
11414 + pushl %edi
11415 + pushl %eax
11416 +
11417 + mov TI_lowest_stack(%ebp), %edi
11418 + mov $-0xBEEF, %eax
11419 + std
11420 +
11421 +1: mov %edi, %ecx
11422 + and $THREAD_SIZE_asm - 1, %ecx
11423 + shr $2, %ecx
11424 + repne scasl
11425 + jecxz 2f
11426 +
11427 + cmp $2*16, %ecx
11428 + jc 2f
11429 +
11430 + mov $2*16, %ecx
11431 + repe scasl
11432 + jecxz 2f
11433 + jne 1b
11434 +
11435 +2: cld
11436 + mov %esp, %ecx
11437 + sub %edi, %ecx
11438 + shr $2, %ecx
11439 + rep stosl
11440 +
11441 + mov TI_task_thread_sp0(%ebp), %edi
11442 + sub $128, %edi
11443 + mov %edi, TI_lowest_stack(%ebp)
11444 +
11445 + popl %eax
11446 + popl %edi
11447 + ret
11448 +ENDPROC(pax_erase_kstack)
11449 +#endif
11450 +
11451 +.macro __SAVE_ALL _DS
11452 cld
11453 PUSH_GS
11454 pushl_cfi %fs
11455 @@ -214,7 +347,7 @@
11456 CFI_REL_OFFSET ecx, 0
11457 pushl_cfi %ebx
11458 CFI_REL_OFFSET ebx, 0
11459 - movl $(__USER_DS), %edx
11460 + movl $\_DS, %edx
11461 movl %edx, %ds
11462 movl %edx, %es
11463 movl $(__KERNEL_PERCPU), %edx
11464 @@ -222,6 +355,15 @@
11465 SET_KERNEL_GS %edx
11466 .endm
11467
11468 +.macro SAVE_ALL
11469 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11470 + __SAVE_ALL __KERNEL_DS
11471 + pax_enter_kernel
11472 +#else
11473 + __SAVE_ALL __USER_DS
11474 +#endif
11475 +.endm
11476 +
11477 .macro RESTORE_INT_REGS
11478 popl_cfi %ebx
11479 CFI_RESTORE ebx
11480 @@ -332,7 +474,15 @@ check_userspace:
11481 movb PT_CS(%esp), %al
11482 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11483 cmpl $USER_RPL, %eax
11484 +
11485 +#ifdef CONFIG_PAX_KERNEXEC
11486 + jae resume_userspace
11487 +
11488 + PAX_EXIT_KERNEL
11489 + jmp resume_kernel
11490 +#else
11491 jb resume_kernel # not returning to v8086 or userspace
11492 +#endif
11493
11494 ENTRY(resume_userspace)
11495 LOCKDEP_SYS_EXIT
11496 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11497 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11498 # int/exception return?
11499 jne work_pending
11500 - jmp restore_all
11501 + jmp restore_all_pax
11502 END(ret_from_exception)
11503
11504 #ifdef CONFIG_PREEMPT
11505 @@ -394,23 +544,34 @@ sysenter_past_esp:
11506 /*CFI_REL_OFFSET cs, 0*/
11507 /*
11508 * Push current_thread_info()->sysenter_return to the stack.
11509 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11510 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11511 */
11512 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11513 + pushl_cfi $0
11514 CFI_REL_OFFSET eip, 0
11515
11516 pushl_cfi %eax
11517 SAVE_ALL
11518 + GET_THREAD_INFO(%ebp)
11519 + movl TI_sysenter_return(%ebp),%ebp
11520 + movl %ebp,PT_EIP(%esp)
11521 ENABLE_INTERRUPTS(CLBR_NONE)
11522
11523 /*
11524 * Load the potential sixth argument from user stack.
11525 * Careful about security.
11526 */
11527 + movl PT_OLDESP(%esp),%ebp
11528 +
11529 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11530 + mov PT_OLDSS(%esp),%ds
11531 +1: movl %ds:(%ebp),%ebp
11532 + push %ss
11533 + pop %ds
11534 +#else
11535 cmpl $__PAGE_OFFSET-3,%ebp
11536 jae syscall_fault
11537 1: movl (%ebp),%ebp
11538 +#endif
11539 +
11540 movl %ebp,PT_EBP(%esp)
11541 .section __ex_table,"a"
11542 .align 4
11543 @@ -433,12 +594,23 @@ sysenter_do_call:
11544 testl $_TIF_ALLWORK_MASK, %ecx
11545 jne sysexit_audit
11546 sysenter_exit:
11547 +
11548 +#ifdef CONFIG_PAX_RANDKSTACK
11549 + pushl_cfi %eax
11550 + call pax_randomize_kstack
11551 + popl_cfi %eax
11552 +#endif
11553 +
11554 + pax_erase_kstack
11555 +
11556 /* if something modifies registers it must also disable sysexit */
11557 movl PT_EIP(%esp), %edx
11558 movl PT_OLDESP(%esp), %ecx
11559 xorl %ebp,%ebp
11560 TRACE_IRQS_ON
11561 1: mov PT_FS(%esp), %fs
11562 +2: mov PT_DS(%esp), %ds
11563 +3: mov PT_ES(%esp), %es
11564 PTGS_TO_GS
11565 ENABLE_INTERRUPTS_SYSEXIT
11566
11567 @@ -455,6 +627,9 @@ sysenter_audit:
11568 movl %eax,%edx /* 2nd arg: syscall number */
11569 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11570 call audit_syscall_entry
11571 +
11572 + pax_erase_kstack
11573 +
11574 pushl_cfi %ebx
11575 movl PT_EAX(%esp),%eax /* reload syscall number */
11576 jmp sysenter_do_call
11577 @@ -481,11 +656,17 @@ sysexit_audit:
11578
11579 CFI_ENDPROC
11580 .pushsection .fixup,"ax"
11581 -2: movl $0,PT_FS(%esp)
11582 +4: movl $0,PT_FS(%esp)
11583 + jmp 1b
11584 +5: movl $0,PT_DS(%esp)
11585 + jmp 1b
11586 +6: movl $0,PT_ES(%esp)
11587 jmp 1b
11588 .section __ex_table,"a"
11589 .align 4
11590 - .long 1b,2b
11591 + .long 1b,4b
11592 + .long 2b,5b
11593 + .long 3b,6b
11594 .popsection
11595 PTGS_TO_GS_EX
11596 ENDPROC(ia32_sysenter_target)
11597 @@ -518,6 +699,14 @@ syscall_exit:
11598 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11599 jne syscall_exit_work
11600
11601 +restore_all_pax:
11602 +
11603 +#ifdef CONFIG_PAX_RANDKSTACK
11604 + call pax_randomize_kstack
11605 +#endif
11606 +
11607 + pax_erase_kstack
11608 +
11609 restore_all:
11610 TRACE_IRQS_IRET
11611 restore_all_notrace:
11612 @@ -577,14 +766,21 @@ ldt_ss:
11613 * compensating for the offset by changing to the ESPFIX segment with
11614 * a base address that matches for the difference.
11615 */
11616 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11617 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11618 mov %esp, %edx /* load kernel esp */
11619 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11620 mov %dx, %ax /* eax: new kernel esp */
11621 sub %eax, %edx /* offset (low word is 0) */
11622 +#ifdef CONFIG_SMP
11623 + movl PER_CPU_VAR(cpu_number), %ebx
11624 + shll $PAGE_SHIFT_asm, %ebx
11625 + addl $cpu_gdt_table, %ebx
11626 +#else
11627 + movl $cpu_gdt_table, %ebx
11628 +#endif
11629 shr $16, %edx
11630 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11631 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11632 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11633 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11634 pushl_cfi $__ESPFIX_SS
11635 pushl_cfi %eax /* new kernel esp */
11636 /* Disable interrupts, but do not irqtrace this section: we
11637 @@ -613,29 +809,23 @@ work_resched:
11638 movl TI_flags(%ebp), %ecx
11639 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11640 # than syscall tracing?
11641 - jz restore_all
11642 + jz restore_all_pax
11643 testb $_TIF_NEED_RESCHED, %cl
11644 jnz work_resched
11645
11646 work_notifysig: # deal with pending signals and
11647 # notify-resume requests
11648 + movl %esp, %eax
11649 #ifdef CONFIG_VM86
11650 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11651 - movl %esp, %eax
11652 - jne work_notifysig_v86 # returning to kernel-space or
11653 + jz 1f # returning to kernel-space or
11654 # vm86-space
11655 - xorl %edx, %edx
11656 - call do_notify_resume
11657 - jmp resume_userspace_sig
11658
11659 - ALIGN
11660 -work_notifysig_v86:
11661 pushl_cfi %ecx # save ti_flags for do_notify_resume
11662 call save_v86_state # %eax contains pt_regs pointer
11663 popl_cfi %ecx
11664 movl %eax, %esp
11665 -#else
11666 - movl %esp, %eax
11667 +1:
11668 #endif
11669 xorl %edx, %edx
11670 call do_notify_resume
11671 @@ -648,6 +838,9 @@ syscall_trace_entry:
11672 movl $-ENOSYS,PT_EAX(%esp)
11673 movl %esp, %eax
11674 call syscall_trace_enter
11675 +
11676 + pax_erase_kstack
11677 +
11678 /* What it returned is what we'll actually use. */
11679 cmpl $(nr_syscalls), %eax
11680 jnae syscall_call
11681 @@ -670,6 +863,10 @@ END(syscall_exit_work)
11682
11683 RING0_INT_FRAME # can't unwind into user space anyway
11684 syscall_fault:
11685 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11686 + push %ss
11687 + pop %ds
11688 +#endif
11689 GET_THREAD_INFO(%ebp)
11690 movl $-EFAULT,PT_EAX(%esp)
11691 jmp resume_userspace
11692 @@ -752,6 +949,36 @@ ptregs_clone:
11693 CFI_ENDPROC
11694 ENDPROC(ptregs_clone)
11695
11696 + ALIGN;
11697 +ENTRY(kernel_execve)
11698 + CFI_STARTPROC
11699 + pushl_cfi %ebp
11700 + sub $PT_OLDSS+4,%esp
11701 + pushl_cfi %edi
11702 + pushl_cfi %ecx
11703 + pushl_cfi %eax
11704 + lea 3*4(%esp),%edi
11705 + mov $PT_OLDSS/4+1,%ecx
11706 + xorl %eax,%eax
11707 + rep stosl
11708 + popl_cfi %eax
11709 + popl_cfi %ecx
11710 + popl_cfi %edi
11711 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11712 + pushl_cfi %esp
11713 + call sys_execve
11714 + add $4,%esp
11715 + CFI_ADJUST_CFA_OFFSET -4
11716 + GET_THREAD_INFO(%ebp)
11717 + test %eax,%eax
11718 + jz syscall_exit
11719 + add $PT_OLDSS+4,%esp
11720 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11721 + popl_cfi %ebp
11722 + ret
11723 + CFI_ENDPROC
11724 +ENDPROC(kernel_execve)
11725 +
11726 .macro FIXUP_ESPFIX_STACK
11727 /*
11728 * Switch back for ESPFIX stack to the normal zerobased stack
11729 @@ -761,8 +988,15 @@ ENDPROC(ptregs_clone)
11730 * normal stack and adjusts ESP with the matching offset.
11731 */
11732 /* fixup the stack */
11733 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11734 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11735 +#ifdef CONFIG_SMP
11736 + movl PER_CPU_VAR(cpu_number), %ebx
11737 + shll $PAGE_SHIFT_asm, %ebx
11738 + addl $cpu_gdt_table, %ebx
11739 +#else
11740 + movl $cpu_gdt_table, %ebx
11741 +#endif
11742 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11743 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11744 shl $16, %eax
11745 addl %esp, %eax /* the adjusted stack pointer */
11746 pushl_cfi $__KERNEL_DS
11747 @@ -1213,7 +1447,6 @@ return_to_handler:
11748 jmp *%ecx
11749 #endif
11750
11751 -.section .rodata,"a"
11752 #include "syscall_table_32.S"
11753
11754 syscall_table_size=(.-sys_call_table)
11755 @@ -1259,9 +1492,12 @@ error_code:
11756 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11757 REG_TO_PTGS %ecx
11758 SET_KERNEL_GS %ecx
11759 - movl $(__USER_DS), %ecx
11760 + movl $(__KERNEL_DS), %ecx
11761 movl %ecx, %ds
11762 movl %ecx, %es
11763 +
11764 + pax_enter_kernel
11765 +
11766 TRACE_IRQS_OFF
11767 movl %esp,%eax # pt_regs pointer
11768 call *%edi
11769 @@ -1346,6 +1582,9 @@ nmi_stack_correct:
11770 xorl %edx,%edx # zero error code
11771 movl %esp,%eax # pt_regs pointer
11772 call do_nmi
11773 +
11774 + pax_exit_kernel
11775 +
11776 jmp restore_all_notrace
11777 CFI_ENDPROC
11778
11779 @@ -1382,6 +1621,9 @@ nmi_espfix_stack:
11780 FIXUP_ESPFIX_STACK # %eax == %esp
11781 xorl %edx,%edx # zero error code
11782 call do_nmi
11783 +
11784 + pax_exit_kernel
11785 +
11786 RESTORE_REGS
11787 lss 12+4(%esp), %esp # back to espfix stack
11788 CFI_ADJUST_CFA_OFFSET -24
11789 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_64.S linux-2.6.39.4/arch/x86/kernel/entry_64.S
11790 --- linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-05-19 00:06:34.000000000 -0400
11791 +++ linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-08-05 19:44:33.000000000 -0400
11792 @@ -53,6 +53,7 @@
11793 #include <asm/paravirt.h>
11794 #include <asm/ftrace.h>
11795 #include <asm/percpu.h>
11796 +#include <asm/pgtable.h>
11797
11798 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11799 #include <linux/elf-em.h>
11800 @@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
11801 ENDPROC(native_usergs_sysret64)
11802 #endif /* CONFIG_PARAVIRT */
11803
11804 + .macro ljmpq sel, off
11805 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11806 + .byte 0x48; ljmp *1234f(%rip)
11807 + .pushsection .rodata
11808 + .align 16
11809 + 1234: .quad \off; .word \sel
11810 + .popsection
11811 +#else
11812 + pushq $\sel
11813 + pushq $\off
11814 + lretq
11815 +#endif
11816 + .endm
11817 +
11818 + .macro pax_enter_kernel
11819 +#ifdef CONFIG_PAX_KERNEXEC
11820 + call pax_enter_kernel
11821 +#endif
11822 + .endm
11823 +
11824 + .macro pax_exit_kernel
11825 +#ifdef CONFIG_PAX_KERNEXEC
11826 + call pax_exit_kernel
11827 +#endif
11828 + .endm
11829 +
11830 +#ifdef CONFIG_PAX_KERNEXEC
11831 +ENTRY(pax_enter_kernel)
11832 + pushq %rdi
11833 +
11834 +#ifdef CONFIG_PARAVIRT
11835 + PV_SAVE_REGS(CLBR_RDI)
11836 +#endif
11837 +
11838 + GET_CR0_INTO_RDI
11839 + bts $16,%rdi
11840 + jnc 1f
11841 + mov %cs,%edi
11842 + cmp $__KERNEL_CS,%edi
11843 + jz 3f
11844 + ljmpq __KERNEL_CS,3f
11845 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11846 +2: SET_RDI_INTO_CR0
11847 +3:
11848 +
11849 +#ifdef CONFIG_PARAVIRT
11850 + PV_RESTORE_REGS(CLBR_RDI)
11851 +#endif
11852 +
11853 + popq %rdi
11854 + retq
11855 +ENDPROC(pax_enter_kernel)
11856 +
11857 +ENTRY(pax_exit_kernel)
11858 + pushq %rdi
11859 +
11860 +#ifdef CONFIG_PARAVIRT
11861 + PV_SAVE_REGS(CLBR_RDI)
11862 +#endif
11863 +
11864 + mov %cs,%rdi
11865 + cmp $__KERNEXEC_KERNEL_CS,%edi
11866 + jnz 2f
11867 + GET_CR0_INTO_RDI
11868 + btr $16,%rdi
11869 + ljmpq __KERNEL_CS,1f
11870 +1: SET_RDI_INTO_CR0
11871 +2:
11872 +
11873 +#ifdef CONFIG_PARAVIRT
11874 + PV_RESTORE_REGS(CLBR_RDI);
11875 +#endif
11876 +
11877 + popq %rdi
11878 + retq
11879 +ENDPROC(pax_exit_kernel)
11880 +#endif
11881 +
11882 + .macro pax_enter_kernel_user
11883 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11884 + call pax_enter_kernel_user
11885 +#endif
11886 + .endm
11887 +
11888 + .macro pax_exit_kernel_user
11889 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11890 + call pax_exit_kernel_user
11891 +#endif
11892 +#ifdef CONFIG_PAX_RANDKSTACK
11893 + push %rax
11894 + call pax_randomize_kstack
11895 + pop %rax
11896 +#endif
11897 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11898 + call pax_erase_kstack
11899 +#endif
11900 + .endm
11901 +
11902 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11903 +ENTRY(pax_enter_kernel_user)
11904 + pushq %rdi
11905 + pushq %rbx
11906 +
11907 +#ifdef CONFIG_PARAVIRT
11908 + PV_SAVE_REGS(CLBR_RDI)
11909 +#endif
11910 +
11911 + GET_CR3_INTO_RDI
11912 + mov %rdi,%rbx
11913 + add $__START_KERNEL_map,%rbx
11914 + sub phys_base(%rip),%rbx
11915 +
11916 +#ifdef CONFIG_PARAVIRT
11917 + pushq %rdi
11918 + cmpl $0, pv_info+PARAVIRT_enabled
11919 + jz 1f
11920 + i = 0
11921 + .rept USER_PGD_PTRS
11922 + mov i*8(%rbx),%rsi
11923 + mov $0,%sil
11924 + lea i*8(%rbx),%rdi
11925 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11926 + i = i + 1
11927 + .endr
11928 + jmp 2f
11929 +1:
11930 +#endif
11931 +
11932 + i = 0
11933 + .rept USER_PGD_PTRS
11934 + movb $0,i*8(%rbx)
11935 + i = i + 1
11936 + .endr
11937 +
11938 +#ifdef CONFIG_PARAVIRT
11939 +2: popq %rdi
11940 +#endif
11941 + SET_RDI_INTO_CR3
11942 +
11943 +#ifdef CONFIG_PAX_KERNEXEC
11944 + GET_CR0_INTO_RDI
11945 + bts $16,%rdi
11946 + SET_RDI_INTO_CR0
11947 +#endif
11948 +
11949 +#ifdef CONFIG_PARAVIRT
11950 + PV_RESTORE_REGS(CLBR_RDI)
11951 +#endif
11952 +
11953 + popq %rbx
11954 + popq %rdi
11955 + retq
11956 +ENDPROC(pax_enter_kernel_user)
11957 +
11958 +ENTRY(pax_exit_kernel_user)
11959 + push %rdi
11960 +
11961 +#ifdef CONFIG_PARAVIRT
11962 + pushq %rbx
11963 + PV_SAVE_REGS(CLBR_RDI)
11964 +#endif
11965 +
11966 +#ifdef CONFIG_PAX_KERNEXEC
11967 + GET_CR0_INTO_RDI
11968 + btr $16,%rdi
11969 + SET_RDI_INTO_CR0
11970 +#endif
11971 +
11972 + GET_CR3_INTO_RDI
11973 + add $__START_KERNEL_map,%rdi
11974 + sub phys_base(%rip),%rdi
11975 +
11976 +#ifdef CONFIG_PARAVIRT
11977 + cmpl $0, pv_info+PARAVIRT_enabled
11978 + jz 1f
11979 + mov %rdi,%rbx
11980 + i = 0
11981 + .rept USER_PGD_PTRS
11982 + mov i*8(%rbx),%rsi
11983 + mov $0x67,%sil
11984 + lea i*8(%rbx),%rdi
11985 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11986 + i = i + 1
11987 + .endr
11988 + jmp 2f
11989 +1:
11990 +#endif
11991 +
11992 + i = 0
11993 + .rept USER_PGD_PTRS
11994 + movb $0x67,i*8(%rdi)
11995 + i = i + 1
11996 + .endr
11997 +
11998 +#ifdef CONFIG_PARAVIRT
11999 +2: PV_RESTORE_REGS(CLBR_RDI)
12000 + popq %rbx
12001 +#endif
12002 +
12003 + popq %rdi
12004 + retq
12005 +ENDPROC(pax_exit_kernel_user)
12006 +#endif
12007 +
12008 + .macro pax_erase_kstack
12009 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12010 + call pax_erase_kstack
12011 +#endif
12012 + .endm
12013 +
12014 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12015 +/*
12016 + * r10: thread_info
12017 + * rcx, rdx: can be clobbered
12018 + */
12019 +ENTRY(pax_erase_kstack)
12020 + pushq %rdi
12021 + pushq %rax
12022 +
12023 + GET_THREAD_INFO(%r10)
12024 + mov TI_lowest_stack(%r10), %rdi
12025 + mov $-0xBEEF, %rax
12026 + std
12027 +
12028 +1: mov %edi, %ecx
12029 + and $THREAD_SIZE_asm - 1, %ecx
12030 + shr $3, %ecx
12031 + repne scasq
12032 + jecxz 2f
12033 +
12034 + cmp $2*8, %ecx
12035 + jc 2f
12036 +
12037 + mov $2*8, %ecx
12038 + repe scasq
12039 + jecxz 2f
12040 + jne 1b
12041 +
12042 +2: cld
12043 + mov %esp, %ecx
12044 + sub %edi, %ecx
12045 + shr $3, %ecx
12046 + rep stosq
12047 +
12048 + mov TI_task_thread_sp0(%r10), %rdi
12049 + sub $256, %rdi
12050 + mov %rdi, TI_lowest_stack(%r10)
12051 +
12052 + popq %rax
12053 + popq %rdi
12054 + ret
12055 +ENDPROC(pax_erase_kstack)
12056 +#endif
12057
12058 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12059 #ifdef CONFIG_TRACE_IRQFLAGS
12060 @@ -318,7 +572,7 @@ ENTRY(save_args)
12061 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12062 movq_cfi rbp, 8 /* push %rbp */
12063 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12064 - testl $3, CS(%rdi)
12065 + testb $3, CS(%rdi)
12066 je 1f
12067 SWAPGS
12068 /*
12069 @@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
12070
12071 RESTORE_REST
12072
12073 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12074 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12075 je int_ret_from_sys_call
12076
12077 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12078 @@ -455,7 +709,7 @@ END(ret_from_fork)
12079 ENTRY(system_call)
12080 CFI_STARTPROC simple
12081 CFI_SIGNAL_FRAME
12082 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12083 + CFI_DEF_CFA rsp,0
12084 CFI_REGISTER rip,rcx
12085 /*CFI_REGISTER rflags,r11*/
12086 SWAPGS_UNSAFE_STACK
12087 @@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
12088
12089 movq %rsp,PER_CPU_VAR(old_rsp)
12090 movq PER_CPU_VAR(kernel_stack),%rsp
12091 + pax_enter_kernel_user
12092 /*
12093 * No need to follow this irqs off/on section - it's straight
12094 * and short:
12095 */
12096 ENABLE_INTERRUPTS(CLBR_NONE)
12097 - SAVE_ARGS 8,1
12098 + SAVE_ARGS 8*6,1
12099 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12100 movq %rcx,RIP-ARGOFFSET(%rsp)
12101 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12102 @@ -502,6 +757,7 @@ sysret_check:
12103 andl %edi,%edx
12104 jnz sysret_careful
12105 CFI_REMEMBER_STATE
12106 + pax_exit_kernel_user
12107 /*
12108 * sysretq will re-enable interrupts:
12109 */
12110 @@ -560,6 +816,9 @@ auditsys:
12111 movq %rax,%rsi /* 2nd arg: syscall number */
12112 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12113 call audit_syscall_entry
12114 +
12115 + pax_erase_kstack
12116 +
12117 LOAD_ARGS 0 /* reload call-clobbered registers */
12118 jmp system_call_fastpath
12119
12120 @@ -590,6 +849,9 @@ tracesys:
12121 FIXUP_TOP_OF_STACK %rdi
12122 movq %rsp,%rdi
12123 call syscall_trace_enter
12124 +
12125 + pax_erase_kstack
12126 +
12127 /*
12128 * Reload arg registers from stack in case ptrace changed them.
12129 * We don't reload %rax because syscall_trace_enter() returned
12130 @@ -611,7 +873,7 @@ tracesys:
12131 GLOBAL(int_ret_from_sys_call)
12132 DISABLE_INTERRUPTS(CLBR_NONE)
12133 TRACE_IRQS_OFF
12134 - testl $3,CS-ARGOFFSET(%rsp)
12135 + testb $3,CS-ARGOFFSET(%rsp)
12136 je retint_restore_args
12137 movl $_TIF_ALLWORK_MASK,%edi
12138 /* edi: mask to check */
12139 @@ -793,6 +1055,16 @@ END(interrupt)
12140 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12141 call save_args
12142 PARTIAL_FRAME 0
12143 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12144 + testb $3, CS(%rdi)
12145 + jnz 1f
12146 + pax_enter_kernel
12147 + jmp 2f
12148 +1: pax_enter_kernel_user
12149 +2:
12150 +#else
12151 + pax_enter_kernel
12152 +#endif
12153 call \func
12154 .endm
12155
12156 @@ -825,7 +1097,7 @@ ret_from_intr:
12157 CFI_ADJUST_CFA_OFFSET -8
12158 exit_intr:
12159 GET_THREAD_INFO(%rcx)
12160 - testl $3,CS-ARGOFFSET(%rsp)
12161 + testb $3,CS-ARGOFFSET(%rsp)
12162 je retint_kernel
12163
12164 /* Interrupt came from user space */
12165 @@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
12166 * The iretq could re-enable interrupts:
12167 */
12168 DISABLE_INTERRUPTS(CLBR_ANY)
12169 + pax_exit_kernel_user
12170 TRACE_IRQS_IRETQ
12171 SWAPGS
12172 jmp restore_args
12173
12174 retint_restore_args: /* return to kernel space */
12175 DISABLE_INTERRUPTS(CLBR_ANY)
12176 + pax_exit_kernel
12177 /*
12178 * The iretq could re-enable interrupts:
12179 */
12180 @@ -1027,6 +1301,16 @@ ENTRY(\sym)
12181 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12182 call error_entry
12183 DEFAULT_FRAME 0
12184 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12185 + testb $3, CS(%rsp)
12186 + jnz 1f
12187 + pax_enter_kernel
12188 + jmp 2f
12189 +1: pax_enter_kernel_user
12190 +2:
12191 +#else
12192 + pax_enter_kernel
12193 +#endif
12194 movq %rsp,%rdi /* pt_regs pointer */
12195 xorl %esi,%esi /* no error code */
12196 call \do_sym
12197 @@ -1044,6 +1328,16 @@ ENTRY(\sym)
12198 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12199 call save_paranoid
12200 TRACE_IRQS_OFF
12201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12202 + testb $3, CS(%rsp)
12203 + jnz 1f
12204 + pax_enter_kernel
12205 + jmp 2f
12206 +1: pax_enter_kernel_user
12207 +2:
12208 +#else
12209 + pax_enter_kernel
12210 +#endif
12211 movq %rsp,%rdi /* pt_regs pointer */
12212 xorl %esi,%esi /* no error code */
12213 call \do_sym
12214 @@ -1052,7 +1346,7 @@ ENTRY(\sym)
12215 END(\sym)
12216 .endm
12217
12218 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12219 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12220 .macro paranoidzeroentry_ist sym do_sym ist
12221 ENTRY(\sym)
12222 INTR_FRAME
12223 @@ -1062,8 +1356,24 @@ ENTRY(\sym)
12224 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12225 call save_paranoid
12226 TRACE_IRQS_OFF
12227 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12228 + testb $3, CS(%rsp)
12229 + jnz 1f
12230 + pax_enter_kernel
12231 + jmp 2f
12232 +1: pax_enter_kernel_user
12233 +2:
12234 +#else
12235 + pax_enter_kernel
12236 +#endif
12237 movq %rsp,%rdi /* pt_regs pointer */
12238 xorl %esi,%esi /* no error code */
12239 +#ifdef CONFIG_SMP
12240 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12241 + lea init_tss(%r12), %r12
12242 +#else
12243 + lea init_tss(%rip), %r12
12244 +#endif
12245 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12246 call \do_sym
12247 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12248 @@ -1080,6 +1390,16 @@ ENTRY(\sym)
12249 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12250 call error_entry
12251 DEFAULT_FRAME 0
12252 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12253 + testb $3, CS(%rsp)
12254 + jnz 1f
12255 + pax_enter_kernel
12256 + jmp 2f
12257 +1: pax_enter_kernel_user
12258 +2:
12259 +#else
12260 + pax_enter_kernel
12261 +#endif
12262 movq %rsp,%rdi /* pt_regs pointer */
12263 movq ORIG_RAX(%rsp),%rsi /* get error code */
12264 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12265 @@ -1099,6 +1419,16 @@ ENTRY(\sym)
12266 call save_paranoid
12267 DEFAULT_FRAME 0
12268 TRACE_IRQS_OFF
12269 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12270 + testb $3, CS(%rsp)
12271 + jnz 1f
12272 + pax_enter_kernel
12273 + jmp 2f
12274 +1: pax_enter_kernel_user
12275 +2:
12276 +#else
12277 + pax_enter_kernel
12278 +#endif
12279 movq %rsp,%rdi /* pt_regs pointer */
12280 movq ORIG_RAX(%rsp),%rsi /* get error code */
12281 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12282 @@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
12283 TRACE_IRQS_OFF
12284 testl %ebx,%ebx /* swapgs needed? */
12285 jnz paranoid_restore
12286 - testl $3,CS(%rsp)
12287 + testb $3,CS(%rsp)
12288 jnz paranoid_userspace
12289 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12290 + pax_exit_kernel
12291 + TRACE_IRQS_IRETQ 0
12292 + SWAPGS_UNSAFE_STACK
12293 + RESTORE_ALL 8
12294 + jmp irq_return
12295 +#endif
12296 paranoid_swapgs:
12297 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12298 + pax_exit_kernel_user
12299 +#else
12300 + pax_exit_kernel
12301 +#endif
12302 TRACE_IRQS_IRETQ 0
12303 SWAPGS_UNSAFE_STACK
12304 RESTORE_ALL 8
12305 jmp irq_return
12306 paranoid_restore:
12307 + pax_exit_kernel
12308 TRACE_IRQS_IRETQ 0
12309 RESTORE_ALL 8
12310 jmp irq_return
12311 @@ -1426,7 +1769,7 @@ ENTRY(error_entry)
12312 movq_cfi r14, R14+8
12313 movq_cfi r15, R15+8
12314 xorl %ebx,%ebx
12315 - testl $3,CS+8(%rsp)
12316 + testb $3,CS+8(%rsp)
12317 je error_kernelspace
12318 error_swapgs:
12319 SWAPGS
12320 @@ -1490,6 +1833,16 @@ ENTRY(nmi)
12321 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12322 call save_paranoid
12323 DEFAULT_FRAME 0
12324 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12325 + testb $3, CS(%rsp)
12326 + jnz 1f
12327 + pax_enter_kernel
12328 + jmp 2f
12329 +1: pax_enter_kernel_user
12330 +2:
12331 +#else
12332 + pax_enter_kernel
12333 +#endif
12334 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12335 movq %rsp,%rdi
12336 movq $-1,%rsi
12337 @@ -1500,11 +1853,25 @@ ENTRY(nmi)
12338 DISABLE_INTERRUPTS(CLBR_NONE)
12339 testl %ebx,%ebx /* swapgs needed? */
12340 jnz nmi_restore
12341 - testl $3,CS(%rsp)
12342 + testb $3,CS(%rsp)
12343 jnz nmi_userspace
12344 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12345 + pax_exit_kernel
12346 + SWAPGS_UNSAFE_STACK
12347 + RESTORE_ALL 8
12348 + jmp irq_return
12349 +#endif
12350 nmi_swapgs:
12351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12352 + pax_exit_kernel_user
12353 +#else
12354 + pax_exit_kernel
12355 +#endif
12356 SWAPGS_UNSAFE_STACK
12357 + RESTORE_ALL 8
12358 + jmp irq_return
12359 nmi_restore:
12360 + pax_exit_kernel
12361 RESTORE_ALL 8
12362 jmp irq_return
12363 nmi_userspace:
12364 diff -urNp linux-2.6.39.4/arch/x86/kernel/ftrace.c linux-2.6.39.4/arch/x86/kernel/ftrace.c
12365 --- linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-05-19 00:06:34.000000000 -0400
12366 +++ linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-08-05 19:44:33.000000000 -0400
12367 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12368 static void *mod_code_newcode; /* holds the text to write to the IP */
12369
12370 static unsigned nmi_wait_count;
12371 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12372 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12373
12374 int ftrace_arch_read_dyn_info(char *buf, int size)
12375 {
12376 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12377
12378 r = snprintf(buf, size, "%u %u",
12379 nmi_wait_count,
12380 - atomic_read(&nmi_update_count));
12381 + atomic_read_unchecked(&nmi_update_count));
12382 return r;
12383 }
12384
12385 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12386
12387 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12388 smp_rmb();
12389 + pax_open_kernel();
12390 ftrace_mod_code();
12391 - atomic_inc(&nmi_update_count);
12392 + pax_close_kernel();
12393 + atomic_inc_unchecked(&nmi_update_count);
12394 }
12395 /* Must have previous changes seen before executions */
12396 smp_mb();
12397 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12398 {
12399 unsigned char replaced[MCOUNT_INSN_SIZE];
12400
12401 + ip = ktla_ktva(ip);
12402 +
12403 /*
12404 * Note: Due to modules and __init, code can
12405 * disappear and change, we need to protect against faulting
12406 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12407 unsigned char old[MCOUNT_INSN_SIZE], *new;
12408 int ret;
12409
12410 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12411 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12412 new = ftrace_call_replace(ip, (unsigned long)func);
12413 ret = ftrace_modify_code(ip, old, new);
12414
12415 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12416 {
12417 unsigned char code[MCOUNT_INSN_SIZE];
12418
12419 + ip = ktla_ktva(ip);
12420 +
12421 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12422 return -EFAULT;
12423
12424 diff -urNp linux-2.6.39.4/arch/x86/kernel/head32.c linux-2.6.39.4/arch/x86/kernel/head32.c
12425 --- linux-2.6.39.4/arch/x86/kernel/head32.c 2011-05-19 00:06:34.000000000 -0400
12426 +++ linux-2.6.39.4/arch/x86/kernel/head32.c 2011-08-05 19:44:33.000000000 -0400
12427 @@ -19,6 +19,7 @@
12428 #include <asm/io_apic.h>
12429 #include <asm/bios_ebda.h>
12430 #include <asm/tlbflush.h>
12431 +#include <asm/boot.h>
12432
12433 static void __init i386_default_early_setup(void)
12434 {
12435 @@ -34,7 +35,7 @@ void __init i386_start_kernel(void)
12436 {
12437 memblock_init();
12438
12439 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12440 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12441
12442 #ifdef CONFIG_BLK_DEV_INITRD
12443 /* Reserve INITRD */
12444 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_32.S linux-2.6.39.4/arch/x86/kernel/head_32.S
12445 --- linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-05-19 00:06:34.000000000 -0400
12446 +++ linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-08-05 19:44:33.000000000 -0400
12447 @@ -25,6 +25,12 @@
12448 /* Physical address */
12449 #define pa(X) ((X) - __PAGE_OFFSET)
12450
12451 +#ifdef CONFIG_PAX_KERNEXEC
12452 +#define ta(X) (X)
12453 +#else
12454 +#define ta(X) ((X) - __PAGE_OFFSET)
12455 +#endif
12456 +
12457 /*
12458 * References to members of the new_cpu_data structure.
12459 */
12460 @@ -54,11 +60,7 @@
12461 * and small than max_low_pfn, otherwise will waste some page table entries
12462 */
12463
12464 -#if PTRS_PER_PMD > 1
12465 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12466 -#else
12467 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12468 -#endif
12469 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12470
12471 /* Number of possible pages in the lowmem region */
12472 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12473 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12474 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12475
12476 /*
12477 + * Real beginning of normal "text" segment
12478 + */
12479 +ENTRY(stext)
12480 +ENTRY(_stext)
12481 +
12482 +/*
12483 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12484 * %esi points to the real-mode code as a 32-bit pointer.
12485 * CS and DS must be 4 GB flat segments, but we don't depend on
12486 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12487 * can.
12488 */
12489 __HEAD
12490 +
12491 +#ifdef CONFIG_PAX_KERNEXEC
12492 + jmp startup_32
12493 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12494 +.fill PAGE_SIZE-5,1,0xcc
12495 +#endif
12496 +
12497 ENTRY(startup_32)
12498 movl pa(stack_start),%ecx
12499
12500 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12501 2:
12502 leal -__PAGE_OFFSET(%ecx),%esp
12503
12504 +#ifdef CONFIG_SMP
12505 + movl $pa(cpu_gdt_table),%edi
12506 + movl $__per_cpu_load,%eax
12507 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12508 + rorl $16,%eax
12509 + movb %al,__KERNEL_PERCPU + 4(%edi)
12510 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12511 + movl $__per_cpu_end - 1,%eax
12512 + subl $__per_cpu_start,%eax
12513 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12514 +#endif
12515 +
12516 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12517 + movl $NR_CPUS,%ecx
12518 + movl $pa(cpu_gdt_table),%edi
12519 +1:
12520 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12521 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12522 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12523 + addl $PAGE_SIZE_asm,%edi
12524 + loop 1b
12525 +#endif
12526 +
12527 +#ifdef CONFIG_PAX_KERNEXEC
12528 + movl $pa(boot_gdt),%edi
12529 + movl $__LOAD_PHYSICAL_ADDR,%eax
12530 + movw %ax,__BOOT_CS + 2(%edi)
12531 + rorl $16,%eax
12532 + movb %al,__BOOT_CS + 4(%edi)
12533 + movb %ah,__BOOT_CS + 7(%edi)
12534 + rorl $16,%eax
12535 +
12536 + ljmp $(__BOOT_CS),$1f
12537 +1:
12538 +
12539 + movl $NR_CPUS,%ecx
12540 + movl $pa(cpu_gdt_table),%edi
12541 + addl $__PAGE_OFFSET,%eax
12542 +1:
12543 + movw %ax,__KERNEL_CS + 2(%edi)
12544 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12545 + rorl $16,%eax
12546 + movb %al,__KERNEL_CS + 4(%edi)
12547 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12548 + movb %ah,__KERNEL_CS + 7(%edi)
12549 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12550 + rorl $16,%eax
12551 + addl $PAGE_SIZE_asm,%edi
12552 + loop 1b
12553 +#endif
12554 +
12555 /*
12556 * Clear BSS first so that there are no surprises...
12557 */
12558 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12559 movl %eax, pa(max_pfn_mapped)
12560
12561 /* Do early initialization of the fixmap area */
12562 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12563 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12564 +#ifdef CONFIG_COMPAT_VDSO
12565 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12566 +#else
12567 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12568 +#endif
12569 #else /* Not PAE */
12570
12571 page_pde_offset = (__PAGE_OFFSET >> 20);
12572 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12573 movl %eax, pa(max_pfn_mapped)
12574
12575 /* Do early initialization of the fixmap area */
12576 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12577 - movl %eax,pa(initial_page_table+0xffc)
12578 +#ifdef CONFIG_COMPAT_VDSO
12579 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12580 +#else
12581 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12582 +#endif
12583 #endif
12584
12585 #ifdef CONFIG_PARAVIRT
12586 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12587 cmpl $num_subarch_entries, %eax
12588 jae bad_subarch
12589
12590 - movl pa(subarch_entries)(,%eax,4), %eax
12591 - subl $__PAGE_OFFSET, %eax
12592 - jmp *%eax
12593 + jmp *pa(subarch_entries)(,%eax,4)
12594
12595 bad_subarch:
12596 WEAK(lguest_entry)
12597 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12598 __INITDATA
12599
12600 subarch_entries:
12601 - .long default_entry /* normal x86/PC */
12602 - .long lguest_entry /* lguest hypervisor */
12603 - .long xen_entry /* Xen hypervisor */
12604 - .long default_entry /* Moorestown MID */
12605 + .long ta(default_entry) /* normal x86/PC */
12606 + .long ta(lguest_entry) /* lguest hypervisor */
12607 + .long ta(xen_entry) /* Xen hypervisor */
12608 + .long ta(default_entry) /* Moorestown MID */
12609 num_subarch_entries = (. - subarch_entries) / 4
12610 .previous
12611 #else
12612 @@ -312,6 +382,7 @@ default_entry:
12613 orl %edx,%eax
12614 movl %eax,%cr4
12615
12616 +#ifdef CONFIG_X86_PAE
12617 testb $X86_CR4_PAE, %al # check if PAE is enabled
12618 jz 6f
12619
12620 @@ -340,6 +411,9 @@ default_entry:
12621 /* Make changes effective */
12622 wrmsr
12623
12624 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12625 +#endif
12626 +
12627 6:
12628
12629 /*
12630 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12631 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12632 movl %eax,%ss # after changing gdt.
12633
12634 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12635 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12636 movl %eax,%ds
12637 movl %eax,%es
12638
12639 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12640 */
12641 cmpb $0,ready
12642 jne 1f
12643 - movl $gdt_page,%eax
12644 + movl $cpu_gdt_table,%eax
12645 movl $stack_canary,%ecx
12646 +#ifdef CONFIG_SMP
12647 + addl $__per_cpu_load,%ecx
12648 +#endif
12649 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12650 shrl $16, %ecx
12651 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12652 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12653 1:
12654 -#endif
12655 movl $(__KERNEL_STACK_CANARY),%eax
12656 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12657 + movl $(__USER_DS),%eax
12658 +#else
12659 + xorl %eax,%eax
12660 +#endif
12661 movl %eax,%gs
12662
12663 xorl %eax,%eax # Clear LDT
12664 @@ -558,22 +639,22 @@ early_page_fault:
12665 jmp early_fault
12666
12667 early_fault:
12668 - cld
12669 #ifdef CONFIG_PRINTK
12670 + cmpl $1,%ss:early_recursion_flag
12671 + je hlt_loop
12672 + incl %ss:early_recursion_flag
12673 + cld
12674 pusha
12675 movl $(__KERNEL_DS),%eax
12676 movl %eax,%ds
12677 movl %eax,%es
12678 - cmpl $2,early_recursion_flag
12679 - je hlt_loop
12680 - incl early_recursion_flag
12681 movl %cr2,%eax
12682 pushl %eax
12683 pushl %edx /* trapno */
12684 pushl $fault_msg
12685 call printk
12686 +; call dump_stack
12687 #endif
12688 - call dump_stack
12689 hlt_loop:
12690 hlt
12691 jmp hlt_loop
12692 @@ -581,8 +662,11 @@ hlt_loop:
12693 /* This is the default interrupt "handler" :-) */
12694 ALIGN
12695 ignore_int:
12696 - cld
12697 #ifdef CONFIG_PRINTK
12698 + cmpl $2,%ss:early_recursion_flag
12699 + je hlt_loop
12700 + incl %ss:early_recursion_flag
12701 + cld
12702 pushl %eax
12703 pushl %ecx
12704 pushl %edx
12705 @@ -591,9 +675,6 @@ ignore_int:
12706 movl $(__KERNEL_DS),%eax
12707 movl %eax,%ds
12708 movl %eax,%es
12709 - cmpl $2,early_recursion_flag
12710 - je hlt_loop
12711 - incl early_recursion_flag
12712 pushl 16(%esp)
12713 pushl 24(%esp)
12714 pushl 32(%esp)
12715 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12716 /*
12717 * BSS section
12718 */
12719 -__PAGE_ALIGNED_BSS
12720 - .align PAGE_SIZE
12721 #ifdef CONFIG_X86_PAE
12722 +.section .initial_pg_pmd,"a",@progbits
12723 initial_pg_pmd:
12724 .fill 1024*KPMDS,4,0
12725 #else
12726 +.section .initial_page_table,"a",@progbits
12727 ENTRY(initial_page_table)
12728 .fill 1024,4,0
12729 #endif
12730 +.section .initial_pg_fixmap,"a",@progbits
12731 initial_pg_fixmap:
12732 .fill 1024,4,0
12733 +.section .empty_zero_page,"a",@progbits
12734 ENTRY(empty_zero_page)
12735 .fill 4096,1,0
12736 +.section .swapper_pg_dir,"a",@progbits
12737 ENTRY(swapper_pg_dir)
12738 +#ifdef CONFIG_X86_PAE
12739 + .fill 4,8,0
12740 +#else
12741 .fill 1024,4,0
12742 +#endif
12743 +
12744 +/*
12745 + * The IDT has to be page-aligned to simplify the Pentium
12746 + * F0 0F bug workaround.. We have a special link segment
12747 + * for this.
12748 + */
12749 +.section .idt,"a",@progbits
12750 +ENTRY(idt_table)
12751 + .fill 256,8,0
12752
12753 /*
12754 * This starts the data section.
12755 */
12756 #ifdef CONFIG_X86_PAE
12757 -__PAGE_ALIGNED_DATA
12758 - /* Page-aligned for the benefit of paravirt? */
12759 - .align PAGE_SIZE
12760 +.section .initial_page_table,"a",@progbits
12761 ENTRY(initial_page_table)
12762 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12763 # if KPMDS == 3
12764 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12765 # error "Kernel PMDs should be 1, 2 or 3"
12766 # endif
12767 .align PAGE_SIZE /* needs to be page-sized too */
12768 +
12769 +#ifdef CONFIG_PAX_PER_CPU_PGD
12770 +ENTRY(cpu_pgd)
12771 + .rept NR_CPUS
12772 + .fill 4,8,0
12773 + .endr
12774 +#endif
12775 +
12776 #endif
12777
12778 .data
12779 .balign 4
12780 ENTRY(stack_start)
12781 - .long init_thread_union+THREAD_SIZE
12782 + .long init_thread_union+THREAD_SIZE-8
12783 +
12784 +ready: .byte 0
12785
12786 +.section .rodata,"a",@progbits
12787 early_recursion_flag:
12788 .long 0
12789
12790 -ready: .byte 0
12791 -
12792 int_msg:
12793 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12794
12795 @@ -707,7 +811,7 @@ fault_msg:
12796 .word 0 # 32 bit align gdt_desc.address
12797 boot_gdt_descr:
12798 .word __BOOT_DS+7
12799 - .long boot_gdt - __PAGE_OFFSET
12800 + .long pa(boot_gdt)
12801
12802 .word 0 # 32-bit align idt_desc.address
12803 idt_descr:
12804 @@ -718,7 +822,7 @@ idt_descr:
12805 .word 0 # 32 bit align gdt_desc.address
12806 ENTRY(early_gdt_descr)
12807 .word GDT_ENTRIES*8-1
12808 - .long gdt_page /* Overwritten for secondary CPUs */
12809 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12810
12811 /*
12812 * The boot_gdt must mirror the equivalent in setup.S and is
12813 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12814 .align L1_CACHE_BYTES
12815 ENTRY(boot_gdt)
12816 .fill GDT_ENTRY_BOOT_CS,8,0
12817 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12818 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12819 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12820 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12821 +
12822 + .align PAGE_SIZE_asm
12823 +ENTRY(cpu_gdt_table)
12824 + .rept NR_CPUS
12825 + .quad 0x0000000000000000 /* NULL descriptor */
12826 + .quad 0x0000000000000000 /* 0x0b reserved */
12827 + .quad 0x0000000000000000 /* 0x13 reserved */
12828 + .quad 0x0000000000000000 /* 0x1b reserved */
12829 +
12830 +#ifdef CONFIG_PAX_KERNEXEC
12831 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12832 +#else
12833 + .quad 0x0000000000000000 /* 0x20 unused */
12834 +#endif
12835 +
12836 + .quad 0x0000000000000000 /* 0x28 unused */
12837 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12838 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12839 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12840 + .quad 0x0000000000000000 /* 0x4b reserved */
12841 + .quad 0x0000000000000000 /* 0x53 reserved */
12842 + .quad 0x0000000000000000 /* 0x5b reserved */
12843 +
12844 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12845 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12846 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12847 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12848 +
12849 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12850 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12851 +
12852 + /*
12853 + * Segments used for calling PnP BIOS have byte granularity.
12854 + * The code segments and data segments have fixed 64k limits,
12855 + * the transfer segment sizes are set at run time.
12856 + */
12857 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12858 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12859 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12860 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12861 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12862 +
12863 + /*
12864 + * The APM segments have byte granularity and their bases
12865 + * are set at run time. All have 64k limits.
12866 + */
12867 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12868 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12869 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12870 +
12871 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12872 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12873 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12874 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12875 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12876 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12877 +
12878 + /* Be sure this is zeroed to avoid false validations in Xen */
12879 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12880 + .endr
12881 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_64.S linux-2.6.39.4/arch/x86/kernel/head_64.S
12882 --- linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-05-19 00:06:34.000000000 -0400
12883 +++ linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-08-05 19:44:33.000000000 -0400
12884 @@ -19,6 +19,7 @@
12885 #include <asm/cache.h>
12886 #include <asm/processor-flags.h>
12887 #include <asm/percpu.h>
12888 +#include <asm/cpufeature.h>
12889
12890 #ifdef CONFIG_PARAVIRT
12891 #include <asm/asm-offsets.h>
12892 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12893 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12894 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12895 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12896 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12897 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12898 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12899 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12900
12901 .text
12902 __HEAD
12903 @@ -85,35 +90,22 @@ startup_64:
12904 */
12905 addq %rbp, init_level4_pgt + 0(%rip)
12906 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12907 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12908 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12909 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12910
12911 addq %rbp, level3_ident_pgt + 0(%rip)
12912 +#ifndef CONFIG_XEN
12913 + addq %rbp, level3_ident_pgt + 8(%rip)
12914 +#endif
12915
12916 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12917 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12918 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12919
12920 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12921 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12922 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12923
12924 - /* Add an Identity mapping if I am above 1G */
12925 - leaq _text(%rip), %rdi
12926 - andq $PMD_PAGE_MASK, %rdi
12927 -
12928 - movq %rdi, %rax
12929 - shrq $PUD_SHIFT, %rax
12930 - andq $(PTRS_PER_PUD - 1), %rax
12931 - jz ident_complete
12932 -
12933 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12934 - leaq level3_ident_pgt(%rip), %rbx
12935 - movq %rdx, 0(%rbx, %rax, 8)
12936 -
12937 - movq %rdi, %rax
12938 - shrq $PMD_SHIFT, %rax
12939 - andq $(PTRS_PER_PMD - 1), %rax
12940 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12941 - leaq level2_spare_pgt(%rip), %rbx
12942 - movq %rdx, 0(%rbx, %rax, 8)
12943 -ident_complete:
12944 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12945 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12946
12947 /*
12948 * Fixup the kernel text+data virtual addresses. Note that
12949 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12950 * after the boot processor executes this code.
12951 */
12952
12953 - /* Enable PAE mode and PGE */
12954 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12955 + /* Enable PAE mode and PSE/PGE */
12956 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12957 movq %rax, %cr4
12958
12959 /* Setup early boot stage 4 level pagetables. */
12960 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12961 movl $MSR_EFER, %ecx
12962 rdmsr
12963 btsl $_EFER_SCE, %eax /* Enable System Call */
12964 - btl $20,%edi /* No Execute supported? */
12965 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12966 jnc 1f
12967 btsl $_EFER_NX, %eax
12968 + leaq init_level4_pgt(%rip), %rdi
12969 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12970 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12971 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12972 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12973 1: wrmsr /* Make changes effective */
12974
12975 /* Setup cr0 */
12976 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12977 bad_address:
12978 jmp bad_address
12979
12980 - .section ".init.text","ax"
12981 + __INIT
12982 #ifdef CONFIG_EARLY_PRINTK
12983 .globl early_idt_handlers
12984 early_idt_handlers:
12985 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12986 #endif /* EARLY_PRINTK */
12987 1: hlt
12988 jmp 1b
12989 + .previous
12990
12991 #ifdef CONFIG_EARLY_PRINTK
12992 + __INITDATA
12993 early_recursion_flag:
12994 .long 0
12995 + .previous
12996
12997 + .section .rodata,"a",@progbits
12998 early_idt_msg:
12999 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13000 early_idt_ripmsg:
13001 .asciz "RIP %s\n"
13002 -#endif /* CONFIG_EARLY_PRINTK */
13003 .previous
13004 +#endif /* CONFIG_EARLY_PRINTK */
13005
13006 + .section .rodata,"a",@progbits
13007 #define NEXT_PAGE(name) \
13008 .balign PAGE_SIZE; \
13009 ENTRY(name)
13010 @@ -338,7 +340,6 @@ ENTRY(name)
13011 i = i + 1 ; \
13012 .endr
13013
13014 - .data
13015 /*
13016 * This default setting generates an ident mapping at address 0x100000
13017 * and a mapping for the kernel that precisely maps virtual address
13018 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13019 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13020 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13021 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13022 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
13023 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13024 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13025 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13026 .org init_level4_pgt + L4_START_KERNEL*8, 0
13027 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13028 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13029
13030 +#ifdef CONFIG_PAX_PER_CPU_PGD
13031 +NEXT_PAGE(cpu_pgd)
13032 + .rept NR_CPUS
13033 + .fill 512,8,0
13034 + .endr
13035 +#endif
13036 +
13037 NEXT_PAGE(level3_ident_pgt)
13038 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13039 +#ifdef CONFIG_XEN
13040 .fill 511,8,0
13041 +#else
13042 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13043 + .fill 510,8,0
13044 +#endif
13045 +
13046 +NEXT_PAGE(level3_vmalloc_pgt)
13047 + .fill 512,8,0
13048 +
13049 +NEXT_PAGE(level3_vmemmap_pgt)
13050 + .fill L3_VMEMMAP_START,8,0
13051 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13052
13053 NEXT_PAGE(level3_kernel_pgt)
13054 .fill L3_START_KERNEL,8,0
13055 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13056 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13057 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13058
13059 +NEXT_PAGE(level2_vmemmap_pgt)
13060 + .fill 512,8,0
13061 +
13062 NEXT_PAGE(level2_fixmap_pgt)
13063 - .fill 506,8,0
13064 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13065 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13066 - .fill 5,8,0
13067 + .fill 507,8,0
13068 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13069 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13070 + .fill 4,8,0
13071
13072 -NEXT_PAGE(level1_fixmap_pgt)
13073 +NEXT_PAGE(level1_vsyscall_pgt)
13074 .fill 512,8,0
13075
13076 -NEXT_PAGE(level2_ident_pgt)
13077 - /* Since I easily can, map the first 1G.
13078 + /* Since I easily can, map the first 2G.
13079 * Don't set NX because code runs from these pages.
13080 */
13081 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13082 +NEXT_PAGE(level2_ident_pgt)
13083 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13084
13085 NEXT_PAGE(level2_kernel_pgt)
13086 /*
13087 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13088 * If you want to increase this then increase MODULES_VADDR
13089 * too.)
13090 */
13091 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13092 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13093 -
13094 -NEXT_PAGE(level2_spare_pgt)
13095 - .fill 512, 8, 0
13096 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13097
13098 #undef PMDS
13099 #undef NEXT_PAGE
13100
13101 - .data
13102 + .align PAGE_SIZE
13103 +ENTRY(cpu_gdt_table)
13104 + .rept NR_CPUS
13105 + .quad 0x0000000000000000 /* NULL descriptor */
13106 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13107 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13108 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13109 + .quad 0x00cffb000000ffff /* __USER32_CS */
13110 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13111 + .quad 0x00affb000000ffff /* __USER_CS */
13112 +
13113 +#ifdef CONFIG_PAX_KERNEXEC
13114 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13115 +#else
13116 + .quad 0x0 /* unused */
13117 +#endif
13118 +
13119 + .quad 0,0 /* TSS */
13120 + .quad 0,0 /* LDT */
13121 + .quad 0,0,0 /* three TLS descriptors */
13122 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13123 + /* asm/segment.h:GDT_ENTRIES must match this */
13124 +
13125 + /* zero the remaining page */
13126 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13127 + .endr
13128 +
13129 .align 16
13130 .globl early_gdt_descr
13131 early_gdt_descr:
13132 .word GDT_ENTRIES*8-1
13133 early_gdt_descr_base:
13134 - .quad INIT_PER_CPU_VAR(gdt_page)
13135 + .quad cpu_gdt_table
13136
13137 ENTRY(phys_base)
13138 /* This must match the first entry in level2_kernel_pgt */
13139 .quad 0x0000000000000000
13140
13141 #include "../../x86/xen/xen-head.S"
13142 -
13143 - .section .bss, "aw", @nobits
13144 +
13145 + .section .rodata,"a",@progbits
13146 .align L1_CACHE_BYTES
13147 ENTRY(idt_table)
13148 - .skip IDT_ENTRIES * 16
13149 + .fill 512,8,0
13150
13151 __PAGE_ALIGNED_BSS
13152 .align PAGE_SIZE
13153 diff -urNp linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c
13154 --- linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-05-19 00:06:34.000000000 -0400
13155 +++ linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-05 19:44:33.000000000 -0400
13156 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13157 EXPORT_SYMBOL(cmpxchg8b_emu);
13158 #endif
13159
13160 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13161 +
13162 /* Networking helper routines. */
13163 EXPORT_SYMBOL(csum_partial_copy_generic);
13164 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13165 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13166
13167 EXPORT_SYMBOL(__get_user_1);
13168 EXPORT_SYMBOL(__get_user_2);
13169 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13170
13171 EXPORT_SYMBOL(csum_partial);
13172 EXPORT_SYMBOL(empty_zero_page);
13173 +
13174 +#ifdef CONFIG_PAX_KERNEXEC
13175 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13176 +#endif
13177 diff -urNp linux-2.6.39.4/arch/x86/kernel/i8259.c linux-2.6.39.4/arch/x86/kernel/i8259.c
13178 --- linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-05-19 00:06:34.000000000 -0400
13179 +++ linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-08-05 19:44:33.000000000 -0400
13180 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13181 "spurious 8259A interrupt: IRQ%d.\n", irq);
13182 spurious_irq_mask |= irqmask;
13183 }
13184 - atomic_inc(&irq_err_count);
13185 + atomic_inc_unchecked(&irq_err_count);
13186 /*
13187 * Theoretically we do not have to handle this IRQ,
13188 * but in Linux this does not cause problems and is
13189 diff -urNp linux-2.6.39.4/arch/x86/kernel/init_task.c linux-2.6.39.4/arch/x86/kernel/init_task.c
13190 --- linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-05-19 00:06:34.000000000 -0400
13191 +++ linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-08-05 19:44:33.000000000 -0400
13192 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13193 * way process stacks are handled. This is done by having a special
13194 * "init_task" linker map entry..
13195 */
13196 -union thread_union init_thread_union __init_task_data =
13197 - { INIT_THREAD_INFO(init_task) };
13198 +union thread_union init_thread_union __init_task_data;
13199
13200 /*
13201 * Initial task structure.
13202 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13203 * section. Since TSS's are completely CPU-local, we want them
13204 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13205 */
13206 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13207 -
13208 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13209 +EXPORT_SYMBOL(init_tss);
13210 diff -urNp linux-2.6.39.4/arch/x86/kernel/ioport.c linux-2.6.39.4/arch/x86/kernel/ioport.c
13211 --- linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400
13212 +++ linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-08-05 19:44:33.000000000 -0400
13213 @@ -6,6 +6,7 @@
13214 #include <linux/sched.h>
13215 #include <linux/kernel.h>
13216 #include <linux/capability.h>
13217 +#include <linux/security.h>
13218 #include <linux/errno.h>
13219 #include <linux/types.h>
13220 #include <linux/ioport.h>
13221 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13222
13223 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13224 return -EINVAL;
13225 +#ifdef CONFIG_GRKERNSEC_IO
13226 + if (turn_on && grsec_disable_privio) {
13227 + gr_handle_ioperm();
13228 + return -EPERM;
13229 + }
13230 +#endif
13231 if (turn_on && !capable(CAP_SYS_RAWIO))
13232 return -EPERM;
13233
13234 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13235 * because the ->io_bitmap_max value must match the bitmap
13236 * contents:
13237 */
13238 - tss = &per_cpu(init_tss, get_cpu());
13239 + tss = init_tss + get_cpu();
13240
13241 if (turn_on)
13242 bitmap_clear(t->io_bitmap_ptr, from, num);
13243 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13244 return -EINVAL;
13245 /* Trying to gain more privileges? */
13246 if (level > old) {
13247 +#ifdef CONFIG_GRKERNSEC_IO
13248 + if (grsec_disable_privio) {
13249 + gr_handle_iopl();
13250 + return -EPERM;
13251 + }
13252 +#endif
13253 if (!capable(CAP_SYS_RAWIO))
13254 return -EPERM;
13255 }
13256 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq_32.c linux-2.6.39.4/arch/x86/kernel/irq_32.c
13257 --- linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-05-19 00:06:34.000000000 -0400
13258 +++ linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-08-05 19:44:33.000000000 -0400
13259 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13260 __asm__ __volatile__("andl %%esp,%0" :
13261 "=r" (sp) : "0" (THREAD_SIZE - 1));
13262
13263 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13264 + return sp < STACK_WARN;
13265 }
13266
13267 static void print_stack_overflow(void)
13268 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13269 * per-CPU IRQ handling contexts (thread information and stack)
13270 */
13271 union irq_ctx {
13272 - struct thread_info tinfo;
13273 - u32 stack[THREAD_SIZE/sizeof(u32)];
13274 + unsigned long previous_esp;
13275 + u32 stack[THREAD_SIZE/sizeof(u32)];
13276 } __attribute__((aligned(THREAD_SIZE)));
13277
13278 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13279 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13280 static inline int
13281 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13282 {
13283 - union irq_ctx *curctx, *irqctx;
13284 + union irq_ctx *irqctx;
13285 u32 *isp, arg1, arg2;
13286
13287 - curctx = (union irq_ctx *) current_thread_info();
13288 irqctx = __this_cpu_read(hardirq_ctx);
13289
13290 /*
13291 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13292 * handler) we can't do that and just have to keep using the
13293 * current stack (which is the irq stack already after all)
13294 */
13295 - if (unlikely(curctx == irqctx))
13296 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13297 return 0;
13298
13299 /* build the stack frame on the IRQ stack */
13300 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13301 - irqctx->tinfo.task = curctx->tinfo.task;
13302 - irqctx->tinfo.previous_esp = current_stack_pointer;
13303 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13304 + irqctx->previous_esp = current_stack_pointer;
13305
13306 - /*
13307 - * Copy the softirq bits in preempt_count so that the
13308 - * softirq checks work in the hardirq context.
13309 - */
13310 - irqctx->tinfo.preempt_count =
13311 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13312 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13313 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13314 + __set_fs(MAKE_MM_SEG(0));
13315 +#endif
13316
13317 if (unlikely(overflow))
13318 call_on_stack(print_stack_overflow, isp);
13319 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13320 : "0" (irq), "1" (desc), "2" (isp),
13321 "D" (desc->handle_irq)
13322 : "memory", "cc", "ecx");
13323 +
13324 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13325 + __set_fs(current_thread_info()->addr_limit);
13326 +#endif
13327 +
13328 return 1;
13329 }
13330
13331 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13332 */
13333 void __cpuinit irq_ctx_init(int cpu)
13334 {
13335 - union irq_ctx *irqctx;
13336 -
13337 if (per_cpu(hardirq_ctx, cpu))
13338 return;
13339
13340 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13341 - THREAD_FLAGS,
13342 - THREAD_ORDER));
13343 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13344 - irqctx->tinfo.cpu = cpu;
13345 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13346 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13347 -
13348 - per_cpu(hardirq_ctx, cpu) = irqctx;
13349 -
13350 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13351 - THREAD_FLAGS,
13352 - THREAD_ORDER));
13353 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13354 - irqctx->tinfo.cpu = cpu;
13355 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13356 -
13357 - per_cpu(softirq_ctx, cpu) = irqctx;
13358 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13359 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13360
13361 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13362 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13363 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13364 asmlinkage void do_softirq(void)
13365 {
13366 unsigned long flags;
13367 - struct thread_info *curctx;
13368 union irq_ctx *irqctx;
13369 u32 *isp;
13370
13371 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13372 local_irq_save(flags);
13373
13374 if (local_softirq_pending()) {
13375 - curctx = current_thread_info();
13376 irqctx = __this_cpu_read(softirq_ctx);
13377 - irqctx->tinfo.task = curctx->task;
13378 - irqctx->tinfo.previous_esp = current_stack_pointer;
13379 + irqctx->previous_esp = current_stack_pointer;
13380
13381 /* build the stack frame on the softirq stack */
13382 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13383 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13384 +
13385 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13386 + __set_fs(MAKE_MM_SEG(0));
13387 +#endif
13388
13389 call_on_stack(__do_softirq, isp);
13390 +
13391 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13392 + __set_fs(current_thread_info()->addr_limit);
13393 +#endif
13394 +
13395 /*
13396 * Shouldn't happen, we returned above if in_interrupt():
13397 */
13398 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq.c linux-2.6.39.4/arch/x86/kernel/irq.c
13399 --- linux-2.6.39.4/arch/x86/kernel/irq.c 2011-05-19 00:06:34.000000000 -0400
13400 +++ linux-2.6.39.4/arch/x86/kernel/irq.c 2011-08-05 19:44:33.000000000 -0400
13401 @@ -17,7 +17,7 @@
13402 #include <asm/mce.h>
13403 #include <asm/hw_irq.h>
13404
13405 -atomic_t irq_err_count;
13406 +atomic_unchecked_t irq_err_count;
13407
13408 /* Function pointer for generic interrupt vector handling */
13409 void (*x86_platform_ipi_callback)(void) = NULL;
13410 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13411 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13412 seq_printf(p, " Machine check polls\n");
13413 #endif
13414 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13415 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13416 #if defined(CONFIG_X86_IO_APIC)
13417 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13418 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13419 #endif
13420 return 0;
13421 }
13422 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13423
13424 u64 arch_irq_stat(void)
13425 {
13426 - u64 sum = atomic_read(&irq_err_count);
13427 + u64 sum = atomic_read_unchecked(&irq_err_count);
13428
13429 #ifdef CONFIG_X86_IO_APIC
13430 - sum += atomic_read(&irq_mis_count);
13431 + sum += atomic_read_unchecked(&irq_mis_count);
13432 #endif
13433 return sum;
13434 }
13435 diff -urNp linux-2.6.39.4/arch/x86/kernel/kgdb.c linux-2.6.39.4/arch/x86/kernel/kgdb.c
13436 --- linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
13437 +++ linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-08-05 20:34:06.000000000 -0400
13438 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13439 #ifdef CONFIG_X86_32
13440 switch (regno) {
13441 case GDB_SS:
13442 - if (!user_mode_vm(regs))
13443 + if (!user_mode(regs))
13444 *(unsigned long *)mem = __KERNEL_DS;
13445 break;
13446 case GDB_SP:
13447 - if (!user_mode_vm(regs))
13448 + if (!user_mode(regs))
13449 *(unsigned long *)mem = kernel_stack_pointer(regs);
13450 break;
13451 case GDB_GS:
13452 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13453 case 'k':
13454 /* clear the trace bit */
13455 linux_regs->flags &= ~X86_EFLAGS_TF;
13456 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13457 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13458
13459 /* set the trace bit if we're stepping */
13460 if (remcomInBuffer[0] == 's') {
13461 linux_regs->flags |= X86_EFLAGS_TF;
13462 - atomic_set(&kgdb_cpu_doing_single_step,
13463 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13464 raw_smp_processor_id());
13465 }
13466
13467 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13468 return NOTIFY_DONE;
13469
13470 case DIE_DEBUG:
13471 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13472 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13473 if (user_mode(regs))
13474 return single_step_cont(regs, args);
13475 break;
13476 diff -urNp linux-2.6.39.4/arch/x86/kernel/kprobes.c linux-2.6.39.4/arch/x86/kernel/kprobes.c
13477 --- linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
13478 +++ linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-08-05 19:44:33.000000000 -0400
13479 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13480 } __attribute__((packed)) *insn;
13481
13482 insn = (struct __arch_relative_insn *)from;
13483 +
13484 + pax_open_kernel();
13485 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13486 insn->op = op;
13487 + pax_close_kernel();
13488 }
13489
13490 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13491 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13492 kprobe_opcode_t opcode;
13493 kprobe_opcode_t *orig_opcodes = opcodes;
13494
13495 - if (search_exception_tables((unsigned long)opcodes))
13496 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13497 return 0; /* Page fault may occur on this address. */
13498
13499 retry:
13500 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13501 }
13502 }
13503 insn_get_length(&insn);
13504 + pax_open_kernel();
13505 memcpy(dest, insn.kaddr, insn.length);
13506 + pax_close_kernel();
13507
13508 #ifdef CONFIG_X86_64
13509 if (insn_rip_relative(&insn)) {
13510 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13511 (u8 *) dest;
13512 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13513 disp = (u8 *) dest + insn_offset_displacement(&insn);
13514 + pax_open_kernel();
13515 *(s32 *) disp = (s32) newdisp;
13516 + pax_close_kernel();
13517 }
13518 #endif
13519 return insn.length;
13520 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13521 */
13522 __copy_instruction(p->ainsn.insn, p->addr, 0);
13523
13524 - if (can_boost(p->addr))
13525 + if (can_boost(ktla_ktva(p->addr)))
13526 p->ainsn.boostable = 0;
13527 else
13528 p->ainsn.boostable = -1;
13529
13530 - p->opcode = *p->addr;
13531 + p->opcode = *(ktla_ktva(p->addr));
13532 }
13533
13534 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13535 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13536 * nor set current_kprobe, because it doesn't use single
13537 * stepping.
13538 */
13539 - regs->ip = (unsigned long)p->ainsn.insn;
13540 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13541 preempt_enable_no_resched();
13542 return;
13543 }
13544 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13545 if (p->opcode == BREAKPOINT_INSTRUCTION)
13546 regs->ip = (unsigned long)p->addr;
13547 else
13548 - regs->ip = (unsigned long)p->ainsn.insn;
13549 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13550 }
13551
13552 /*
13553 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13554 setup_singlestep(p, regs, kcb, 0);
13555 return 1;
13556 }
13557 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13558 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13559 /*
13560 * The breakpoint instruction was removed right
13561 * after we hit it. Another cpu has removed
13562 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13563 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13564 {
13565 unsigned long *tos = stack_addr(regs);
13566 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13567 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13568 unsigned long orig_ip = (unsigned long)p->addr;
13569 kprobe_opcode_t *insn = p->ainsn.insn;
13570
13571 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13572 struct die_args *args = data;
13573 int ret = NOTIFY_DONE;
13574
13575 - if (args->regs && user_mode_vm(args->regs))
13576 + if (args->regs && user_mode(args->regs))
13577 return ret;
13578
13579 switch (val) {
13580 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13581 * Verify if the address gap is in 2GB range, because this uses
13582 * a relative jump.
13583 */
13584 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13585 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13586 if (abs(rel) > 0x7fffffff)
13587 return -ERANGE;
13588
13589 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13590 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13591
13592 /* Set probe function call */
13593 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13594 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13595
13596 /* Set returning jmp instruction at the tail of out-of-line buffer */
13597 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13598 - (u8 *)op->kp.addr + op->optinsn.size);
13599 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13600
13601 flush_icache_range((unsigned long) buf,
13602 (unsigned long) buf + TMPL_END_IDX +
13603 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13604 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13605
13606 /* Backup instructions which will be replaced by jump address */
13607 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13608 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13609 RELATIVE_ADDR_SIZE);
13610
13611 insn_buf[0] = RELATIVEJUMP_OPCODE;
13612 diff -urNp linux-2.6.39.4/arch/x86/kernel/ldt.c linux-2.6.39.4/arch/x86/kernel/ldt.c
13613 --- linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-05-19 00:06:34.000000000 -0400
13614 +++ linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-08-05 19:44:33.000000000 -0400
13615 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13616 if (reload) {
13617 #ifdef CONFIG_SMP
13618 preempt_disable();
13619 - load_LDT(pc);
13620 + load_LDT_nolock(pc);
13621 if (!cpumask_equal(mm_cpumask(current->mm),
13622 cpumask_of(smp_processor_id())))
13623 smp_call_function(flush_ldt, current->mm, 1);
13624 preempt_enable();
13625 #else
13626 - load_LDT(pc);
13627 + load_LDT_nolock(pc);
13628 #endif
13629 }
13630 if (oldsize) {
13631 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13632 return err;
13633
13634 for (i = 0; i < old->size; i++)
13635 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13636 + write_ldt_entry(new->ldt, i, old->ldt + i);
13637 return 0;
13638 }
13639
13640 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13641 retval = copy_ldt(&mm->context, &old_mm->context);
13642 mutex_unlock(&old_mm->context.lock);
13643 }
13644 +
13645 + if (tsk == current) {
13646 + mm->context.vdso = 0;
13647 +
13648 +#ifdef CONFIG_X86_32
13649 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13650 + mm->context.user_cs_base = 0UL;
13651 + mm->context.user_cs_limit = ~0UL;
13652 +
13653 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13654 + cpus_clear(mm->context.cpu_user_cs_mask);
13655 +#endif
13656 +
13657 +#endif
13658 +#endif
13659 +
13660 + }
13661 +
13662 return retval;
13663 }
13664
13665 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13666 }
13667 }
13668
13669 +#ifdef CONFIG_PAX_SEGMEXEC
13670 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13671 + error = -EINVAL;
13672 + goto out_unlock;
13673 + }
13674 +#endif
13675 +
13676 fill_ldt(&ldt, &ldt_info);
13677 if (oldmode)
13678 ldt.avl = 0;
13679 diff -urNp linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c
13680 --- linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-05-19 00:06:34.000000000 -0400
13681 +++ linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-08-05 19:44:33.000000000 -0400
13682 @@ -27,7 +27,7 @@
13683 #include <asm/cacheflush.h>
13684 #include <asm/debugreg.h>
13685
13686 -static void set_idt(void *newidt, __u16 limit)
13687 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13688 {
13689 struct desc_ptr curidt;
13690
13691 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13692 }
13693
13694
13695 -static void set_gdt(void *newgdt, __u16 limit)
13696 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13697 {
13698 struct desc_ptr curgdt;
13699
13700 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13701 }
13702
13703 control_page = page_address(image->control_code_page);
13704 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13705 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13706
13707 relocate_kernel_ptr = control_page;
13708 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13709 diff -urNp linux-2.6.39.4/arch/x86/kernel/microcode_intel.c linux-2.6.39.4/arch/x86/kernel/microcode_intel.c
13710 --- linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-05-19 00:06:34.000000000 -0400
13711 +++ linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-08-05 20:34:06.000000000 -0400
13712 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13713
13714 static int get_ucode_user(void *to, const void *from, size_t n)
13715 {
13716 - return copy_from_user(to, from, n);
13717 + return copy_from_user(to, (__force const void __user *)from, n);
13718 }
13719
13720 static enum ucode_state
13721 request_microcode_user(int cpu, const void __user *buf, size_t size)
13722 {
13723 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13724 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13725 }
13726
13727 static void microcode_fini_cpu(int cpu)
13728 diff -urNp linux-2.6.39.4/arch/x86/kernel/module.c linux-2.6.39.4/arch/x86/kernel/module.c
13729 --- linux-2.6.39.4/arch/x86/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
13730 +++ linux-2.6.39.4/arch/x86/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
13731 @@ -35,21 +35,66 @@
13732 #define DEBUGP(fmt...)
13733 #endif
13734
13735 -void *module_alloc(unsigned long size)
13736 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13737 {
13738 if (PAGE_ALIGN(size) > MODULES_LEN)
13739 return NULL;
13740 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13741 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13742 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13743 -1, __builtin_return_address(0));
13744 }
13745
13746 +void *module_alloc(unsigned long size)
13747 +{
13748 +
13749 +#ifdef CONFIG_PAX_KERNEXEC
13750 + return __module_alloc(size, PAGE_KERNEL);
13751 +#else
13752 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13753 +#endif
13754 +
13755 +}
13756 +
13757 /* Free memory returned from module_alloc */
13758 void module_free(struct module *mod, void *module_region)
13759 {
13760 vfree(module_region);
13761 }
13762
13763 +#ifdef CONFIG_PAX_KERNEXEC
13764 +#ifdef CONFIG_X86_32
13765 +void *module_alloc_exec(unsigned long size)
13766 +{
13767 + struct vm_struct *area;
13768 +
13769 + if (size == 0)
13770 + return NULL;
13771 +
13772 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13773 + return area ? area->addr : NULL;
13774 +}
13775 +EXPORT_SYMBOL(module_alloc_exec);
13776 +
13777 +void module_free_exec(struct module *mod, void *module_region)
13778 +{
13779 + vunmap(module_region);
13780 +}
13781 +EXPORT_SYMBOL(module_free_exec);
13782 +#else
13783 +void module_free_exec(struct module *mod, void *module_region)
13784 +{
13785 + module_free(mod, module_region);
13786 +}
13787 +EXPORT_SYMBOL(module_free_exec);
13788 +
13789 +void *module_alloc_exec(unsigned long size)
13790 +{
13791 + return __module_alloc(size, PAGE_KERNEL_RX);
13792 +}
13793 +EXPORT_SYMBOL(module_alloc_exec);
13794 +#endif
13795 +#endif
13796 +
13797 /* We don't need anything special. */
13798 int module_frob_arch_sections(Elf_Ehdr *hdr,
13799 Elf_Shdr *sechdrs,
13800 @@ -69,14 +114,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13801 unsigned int i;
13802 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13803 Elf32_Sym *sym;
13804 - uint32_t *location;
13805 + uint32_t *plocation, location;
13806
13807 DEBUGP("Applying relocate section %u to %u\n", relsec,
13808 sechdrs[relsec].sh_info);
13809 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13810 /* This is where to make the change */
13811 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13812 - + rel[i].r_offset;
13813 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13814 + location = (uint32_t)plocation;
13815 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13816 + plocation = ktla_ktva((void *)plocation);
13817 /* This is the symbol it is referring to. Note that all
13818 undefined symbols have been resolved. */
13819 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13820 @@ -85,11 +132,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13821 switch (ELF32_R_TYPE(rel[i].r_info)) {
13822 case R_386_32:
13823 /* We add the value into the location given */
13824 - *location += sym->st_value;
13825 + pax_open_kernel();
13826 + *plocation += sym->st_value;
13827 + pax_close_kernel();
13828 break;
13829 case R_386_PC32:
13830 /* Add the value, subtract its postition */
13831 - *location += sym->st_value - (uint32_t)location;
13832 + pax_open_kernel();
13833 + *plocation += sym->st_value - location;
13834 + pax_close_kernel();
13835 break;
13836 default:
13837 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13838 @@ -145,21 +196,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13839 case R_X86_64_NONE:
13840 break;
13841 case R_X86_64_64:
13842 + pax_open_kernel();
13843 *(u64 *)loc = val;
13844 + pax_close_kernel();
13845 break;
13846 case R_X86_64_32:
13847 + pax_open_kernel();
13848 *(u32 *)loc = val;
13849 + pax_close_kernel();
13850 if (val != *(u32 *)loc)
13851 goto overflow;
13852 break;
13853 case R_X86_64_32S:
13854 + pax_open_kernel();
13855 *(s32 *)loc = val;
13856 + pax_close_kernel();
13857 if ((s64)val != *(s32 *)loc)
13858 goto overflow;
13859 break;
13860 case R_X86_64_PC32:
13861 val -= (u64)loc;
13862 + pax_open_kernel();
13863 *(u32 *)loc = val;
13864 + pax_close_kernel();
13865 +
13866 #if 0
13867 if ((s64)val != *(s32 *)loc)
13868 goto overflow;
13869 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt.c linux-2.6.39.4/arch/x86/kernel/paravirt.c
13870 --- linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-05-19 00:06:34.000000000 -0400
13871 +++ linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-08-05 19:44:33.000000000 -0400
13872 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13873 {
13874 return x;
13875 }
13876 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13877 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13878 +#endif
13879
13880 void __init default_banner(void)
13881 {
13882 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13883 * corresponding structure. */
13884 static void *get_call_destination(u8 type)
13885 {
13886 - struct paravirt_patch_template tmpl = {
13887 + const struct paravirt_patch_template tmpl = {
13888 .pv_init_ops = pv_init_ops,
13889 .pv_time_ops = pv_time_ops,
13890 .pv_cpu_ops = pv_cpu_ops,
13891 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13892 .pv_lock_ops = pv_lock_ops,
13893 #endif
13894 };
13895 +
13896 + pax_track_stack();
13897 +
13898 return *((void **)&tmpl + type);
13899 }
13900
13901 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13902 if (opfunc == NULL)
13903 /* If there's no function, patch it with a ud2a (BUG) */
13904 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13905 - else if (opfunc == _paravirt_nop)
13906 + else if (opfunc == (void *)_paravirt_nop)
13907 /* If the operation is a nop, then nop the callsite */
13908 ret = paravirt_patch_nop();
13909
13910 /* identity functions just return their single argument */
13911 - else if (opfunc == _paravirt_ident_32)
13912 + else if (opfunc == (void *)_paravirt_ident_32)
13913 ret = paravirt_patch_ident_32(insnbuf, len);
13914 - else if (opfunc == _paravirt_ident_64)
13915 + else if (opfunc == (void *)_paravirt_ident_64)
13916 ret = paravirt_patch_ident_64(insnbuf, len);
13917 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13918 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13919 + ret = paravirt_patch_ident_64(insnbuf, len);
13920 +#endif
13921
13922 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13923 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13924 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13925 if (insn_len > len || start == NULL)
13926 insn_len = len;
13927 else
13928 - memcpy(insnbuf, start, insn_len);
13929 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13930
13931 return insn_len;
13932 }
13933 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13934 preempt_enable();
13935 }
13936
13937 -struct pv_info pv_info = {
13938 +struct pv_info pv_info __read_only = {
13939 .name = "bare hardware",
13940 .paravirt_enabled = 0,
13941 .kernel_rpl = 0,
13942 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13943 };
13944
13945 -struct pv_init_ops pv_init_ops = {
13946 +struct pv_init_ops pv_init_ops __read_only = {
13947 .patch = native_patch,
13948 };
13949
13950 -struct pv_time_ops pv_time_ops = {
13951 +struct pv_time_ops pv_time_ops __read_only = {
13952 .sched_clock = native_sched_clock,
13953 };
13954
13955 -struct pv_irq_ops pv_irq_ops = {
13956 +struct pv_irq_ops pv_irq_ops __read_only = {
13957 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13958 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13959 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13960 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13961 #endif
13962 };
13963
13964 -struct pv_cpu_ops pv_cpu_ops = {
13965 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13966 .cpuid = native_cpuid,
13967 .get_debugreg = native_get_debugreg,
13968 .set_debugreg = native_set_debugreg,
13969 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13970 .end_context_switch = paravirt_nop,
13971 };
13972
13973 -struct pv_apic_ops pv_apic_ops = {
13974 +struct pv_apic_ops pv_apic_ops __read_only = {
13975 #ifdef CONFIG_X86_LOCAL_APIC
13976 .startup_ipi_hook = paravirt_nop,
13977 #endif
13978 };
13979
13980 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13981 +#ifdef CONFIG_X86_32
13982 +#ifdef CONFIG_X86_PAE
13983 +/* 64-bit pagetable entries */
13984 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13985 +#else
13986 /* 32-bit pagetable entries */
13987 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13988 +#endif
13989 #else
13990 /* 64-bit pagetable entries */
13991 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13992 #endif
13993
13994 -struct pv_mmu_ops pv_mmu_ops = {
13995 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13996
13997 .read_cr2 = native_read_cr2,
13998 .write_cr2 = native_write_cr2,
13999 @@ -465,6 +480,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14000 },
14001
14002 .set_fixmap = native_set_fixmap,
14003 +
14004 +#ifdef CONFIG_PAX_KERNEXEC
14005 + .pax_open_kernel = native_pax_open_kernel,
14006 + .pax_close_kernel = native_pax_close_kernel,
14007 +#endif
14008 +
14009 };
14010
14011 EXPORT_SYMBOL_GPL(pv_time_ops);
14012 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c
14013 --- linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-05-19 00:06:34.000000000 -0400
14014 +++ linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-05 19:44:33.000000000 -0400
14015 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14016 arch_spin_lock(lock);
14017 }
14018
14019 -struct pv_lock_ops pv_lock_ops = {
14020 +struct pv_lock_ops pv_lock_ops __read_only = {
14021 #ifdef CONFIG_SMP
14022 .spin_is_locked = __ticket_spin_is_locked,
14023 .spin_is_contended = __ticket_spin_is_contended,
14024 diff -urNp linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c
14025 --- linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-05-19 00:06:34.000000000 -0400
14026 +++ linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-08-05 19:44:35.000000000 -0400
14027 @@ -2,7 +2,7 @@
14028 #include <asm/iommu_table.h>
14029 #include <linux/string.h>
14030 #include <linux/kallsyms.h>
14031 -
14032 +#include <linux/sched.h>
14033
14034 #define DEBUG 1
14035
14036 @@ -53,6 +53,8 @@ void __init check_iommu_entries(struct i
14037 char sym_p[KSYM_SYMBOL_LEN];
14038 char sym_q[KSYM_SYMBOL_LEN];
14039
14040 + pax_track_stack();
14041 +
14042 /* Simple cyclic dependency checker. */
14043 for (p = start; p < finish; p++) {
14044 q = find_dependents_of(start, finish, p);
14045 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_32.c linux-2.6.39.4/arch/x86/kernel/process_32.c
14046 --- linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-06-25 12:55:22.000000000 -0400
14047 +++ linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-08-05 19:44:35.000000000 -0400
14048 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14049 unsigned long thread_saved_pc(struct task_struct *tsk)
14050 {
14051 return ((unsigned long *)tsk->thread.sp)[3];
14052 +//XXX return tsk->thread.eip;
14053 }
14054
14055 #ifndef CONFIG_SMP
14056 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14057 unsigned long sp;
14058 unsigned short ss, gs;
14059
14060 - if (user_mode_vm(regs)) {
14061 + if (user_mode(regs)) {
14062 sp = regs->sp;
14063 ss = regs->ss & 0xffff;
14064 - gs = get_user_gs(regs);
14065 } else {
14066 sp = kernel_stack_pointer(regs);
14067 savesegment(ss, ss);
14068 - savesegment(gs, gs);
14069 }
14070 + gs = get_user_gs(regs);
14071
14072 show_regs_common();
14073
14074 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14075 struct task_struct *tsk;
14076 int err;
14077
14078 - childregs = task_pt_regs(p);
14079 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14080 *childregs = *regs;
14081 childregs->ax = 0;
14082 childregs->sp = sp;
14083
14084 p->thread.sp = (unsigned long) childregs;
14085 p->thread.sp0 = (unsigned long) (childregs+1);
14086 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14087
14088 p->thread.ip = (unsigned long) ret_from_fork;
14089
14090 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14091 struct thread_struct *prev = &prev_p->thread,
14092 *next = &next_p->thread;
14093 int cpu = smp_processor_id();
14094 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14095 + struct tss_struct *tss = init_tss + cpu;
14096 bool preload_fpu;
14097
14098 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14099 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14100 */
14101 lazy_save_gs(prev->gs);
14102
14103 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14104 + __set_fs(task_thread_info(next_p)->addr_limit);
14105 +#endif
14106 +
14107 /*
14108 * Load the per-thread Thread-Local Storage descriptor.
14109 */
14110 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14111 */
14112 arch_end_context_switch(next_p);
14113
14114 + percpu_write(current_task, next_p);
14115 + percpu_write(current_tinfo, &next_p->tinfo);
14116 +
14117 if (preload_fpu)
14118 __math_state_restore();
14119
14120 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14121 if (prev->gs | next->gs)
14122 lazy_load_gs(next->gs);
14123
14124 - percpu_write(current_task, next_p);
14125 -
14126 return prev_p;
14127 }
14128
14129 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14130 } while (count++ < 16);
14131 return 0;
14132 }
14133 -
14134 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_64.c linux-2.6.39.4/arch/x86/kernel/process_64.c
14135 --- linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-06-25 12:55:22.000000000 -0400
14136 +++ linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-08-05 19:44:35.000000000 -0400
14137 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14138 void exit_idle(void)
14139 {
14140 /* idle loop has pid 0 */
14141 - if (current->pid)
14142 + if (task_pid_nr(current))
14143 return;
14144 __exit_idle();
14145 }
14146 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14147 struct pt_regs *childregs;
14148 struct task_struct *me = current;
14149
14150 - childregs = ((struct pt_regs *)
14151 - (THREAD_SIZE + task_stack_page(p))) - 1;
14152 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14153 *childregs = *regs;
14154
14155 childregs->ax = 0;
14156 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14157 p->thread.sp = (unsigned long) childregs;
14158 p->thread.sp0 = (unsigned long) (childregs+1);
14159 p->thread.usersp = me->thread.usersp;
14160 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14161
14162 set_tsk_thread_flag(p, TIF_FORK);
14163
14164 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14165 struct thread_struct *prev = &prev_p->thread;
14166 struct thread_struct *next = &next_p->thread;
14167 int cpu = smp_processor_id();
14168 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14169 + struct tss_struct *tss = init_tss + cpu;
14170 unsigned fsindex, gsindex;
14171 bool preload_fpu;
14172
14173 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14174 prev->usersp = percpu_read(old_rsp);
14175 percpu_write(old_rsp, next->usersp);
14176 percpu_write(current_task, next_p);
14177 + percpu_write(current_tinfo, &next_p->tinfo);
14178
14179 - percpu_write(kernel_stack,
14180 - (unsigned long)task_stack_page(next_p) +
14181 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14182 + percpu_write(kernel_stack, next->sp0);
14183
14184 /*
14185 * Now maybe reload the debug registers and handle I/O bitmaps
14186 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14187 if (!p || p == current || p->state == TASK_RUNNING)
14188 return 0;
14189 stack = (unsigned long)task_stack_page(p);
14190 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14191 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14192 return 0;
14193 fp = *(u64 *)(p->thread.sp);
14194 do {
14195 - if (fp < (unsigned long)stack ||
14196 - fp >= (unsigned long)stack+THREAD_SIZE)
14197 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14198 return 0;
14199 ip = *(u64 *)(fp+8);
14200 if (!in_sched_functions(ip))
14201 diff -urNp linux-2.6.39.4/arch/x86/kernel/process.c linux-2.6.39.4/arch/x86/kernel/process.c
14202 --- linux-2.6.39.4/arch/x86/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
14203 +++ linux-2.6.39.4/arch/x86/kernel/process.c 2011-08-05 19:44:35.000000000 -0400
14204 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14205
14206 void free_thread_info(struct thread_info *ti)
14207 {
14208 - free_thread_xstate(ti->task);
14209 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14210 }
14211
14212 +static struct kmem_cache *task_struct_cachep;
14213 +
14214 void arch_task_cache_init(void)
14215 {
14216 - task_xstate_cachep =
14217 - kmem_cache_create("task_xstate", xstate_size,
14218 + /* create a slab on which task_structs can be allocated */
14219 + task_struct_cachep =
14220 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14221 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14222 +
14223 + task_xstate_cachep =
14224 + kmem_cache_create("task_xstate", xstate_size,
14225 __alignof__(union thread_xstate),
14226 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14227 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14228 +}
14229 +
14230 +struct task_struct *alloc_task_struct_node(int node)
14231 +{
14232 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14233 +}
14234 +
14235 +void free_task_struct(struct task_struct *task)
14236 +{
14237 + free_thread_xstate(task);
14238 + kmem_cache_free(task_struct_cachep, task);
14239 }
14240
14241 /*
14242 @@ -70,7 +87,7 @@ void exit_thread(void)
14243 unsigned long *bp = t->io_bitmap_ptr;
14244
14245 if (bp) {
14246 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14247 + struct tss_struct *tss = init_tss + get_cpu();
14248
14249 t->io_bitmap_ptr = NULL;
14250 clear_thread_flag(TIF_IO_BITMAP);
14251 @@ -106,7 +123,7 @@ void show_regs_common(void)
14252
14253 printk(KERN_CONT "\n");
14254 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14255 - current->pid, current->comm, print_tainted(),
14256 + task_pid_nr(current), current->comm, print_tainted(),
14257 init_utsname()->release,
14258 (int)strcspn(init_utsname()->version, " "),
14259 init_utsname()->version);
14260 @@ -120,6 +137,9 @@ void flush_thread(void)
14261 {
14262 struct task_struct *tsk = current;
14263
14264 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14265 + loadsegment(gs, 0);
14266 +#endif
14267 flush_ptrace_hw_breakpoint(tsk);
14268 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14269 /*
14270 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14271 regs.di = (unsigned long) arg;
14272
14273 #ifdef CONFIG_X86_32
14274 - regs.ds = __USER_DS;
14275 - regs.es = __USER_DS;
14276 + regs.ds = __KERNEL_DS;
14277 + regs.es = __KERNEL_DS;
14278 regs.fs = __KERNEL_PERCPU;
14279 - regs.gs = __KERNEL_STACK_CANARY;
14280 + savesegment(gs, regs.gs);
14281 #else
14282 regs.ss = __KERNEL_DS;
14283 #endif
14284 @@ -401,7 +421,7 @@ void default_idle(void)
14285 EXPORT_SYMBOL(default_idle);
14286 #endif
14287
14288 -void stop_this_cpu(void *dummy)
14289 +__noreturn void stop_this_cpu(void *dummy)
14290 {
14291 local_irq_disable();
14292 /*
14293 @@ -665,16 +685,34 @@ static int __init idle_setup(char *str)
14294 }
14295 early_param("idle", idle_setup);
14296
14297 -unsigned long arch_align_stack(unsigned long sp)
14298 +#ifdef CONFIG_PAX_RANDKSTACK
14299 +asmlinkage void pax_randomize_kstack(void)
14300 {
14301 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14302 - sp -= get_random_int() % 8192;
14303 - return sp & ~0xf;
14304 -}
14305 + struct thread_struct *thread = &current->thread;
14306 + unsigned long time;
14307
14308 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14309 -{
14310 - unsigned long range_end = mm->brk + 0x02000000;
14311 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14312 -}
14313 + if (!randomize_va_space)
14314 + return;
14315 +
14316 + rdtscl(time);
14317 +
14318 + /* P4 seems to return a 0 LSB, ignore it */
14319 +#ifdef CONFIG_MPENTIUM4
14320 + time &= 0x3EUL;
14321 + time <<= 2;
14322 +#elif defined(CONFIG_X86_64)
14323 + time &= 0xFUL;
14324 + time <<= 4;
14325 +#else
14326 + time &= 0x1FUL;
14327 + time <<= 3;
14328 +#endif
14329 +
14330 + thread->sp0 ^= time;
14331 + load_sp0(init_tss + smp_processor_id(), thread);
14332
14333 +#ifdef CONFIG_X86_64
14334 + percpu_write(kernel_stack, thread->sp0);
14335 +#endif
14336 +}
14337 +#endif
14338 diff -urNp linux-2.6.39.4/arch/x86/kernel/ptrace.c linux-2.6.39.4/arch/x86/kernel/ptrace.c
14339 --- linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
14340 +++ linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-08-05 19:44:35.000000000 -0400
14341 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14342 unsigned long addr, unsigned long data)
14343 {
14344 int ret;
14345 - unsigned long __user *datap = (unsigned long __user *)data;
14346 + unsigned long __user *datap = (__force unsigned long __user *)data;
14347
14348 switch (request) {
14349 /* read the word at location addr in the USER area. */
14350 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14351 if ((int) addr < 0)
14352 return -EIO;
14353 ret = do_get_thread_area(child, addr,
14354 - (struct user_desc __user *)data);
14355 + (__force struct user_desc __user *) data);
14356 break;
14357
14358 case PTRACE_SET_THREAD_AREA:
14359 if ((int) addr < 0)
14360 return -EIO;
14361 ret = do_set_thread_area(child, addr,
14362 - (struct user_desc __user *)data, 0);
14363 + (__force struct user_desc __user *) data, 0);
14364 break;
14365 #endif
14366
14367 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14368 memset(info, 0, sizeof(*info));
14369 info->si_signo = SIGTRAP;
14370 info->si_code = si_code;
14371 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14372 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14373 }
14374
14375 void user_single_step_siginfo(struct task_struct *tsk,
14376 @@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *ts
14377 * We must return the syscall number to actually look up in the table.
14378 * This can be -1L to skip running any syscall at all.
14379 */
14380 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
14381 +long syscall_trace_enter(struct pt_regs *regs)
14382 {
14383 long ret = 0;
14384
14385 @@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(stru
14386 return ret ?: regs->orig_ax;
14387 }
14388
14389 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
14390 +void syscall_trace_leave(struct pt_regs *regs)
14391 {
14392 bool step;
14393
14394 diff -urNp linux-2.6.39.4/arch/x86/kernel/pvclock.c linux-2.6.39.4/arch/x86/kernel/pvclock.c
14395 --- linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-05-19 00:06:34.000000000 -0400
14396 +++ linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-08-05 19:44:35.000000000 -0400
14397 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14398 return pv_tsc_khz;
14399 }
14400
14401 -static atomic64_t last_value = ATOMIC64_INIT(0);
14402 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14403
14404 void pvclock_resume(void)
14405 {
14406 - atomic64_set(&last_value, 0);
14407 + atomic64_set_unchecked(&last_value, 0);
14408 }
14409
14410 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14411 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14412 * updating at the same time, and one of them could be slightly behind,
14413 * making the assumption that last_value always go forward fail to hold.
14414 */
14415 - last = atomic64_read(&last_value);
14416 + last = atomic64_read_unchecked(&last_value);
14417 do {
14418 if (ret < last)
14419 return last;
14420 - last = atomic64_cmpxchg(&last_value, last, ret);
14421 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14422 } while (unlikely(last != ret));
14423
14424 return ret;
14425 diff -urNp linux-2.6.39.4/arch/x86/kernel/reboot.c linux-2.6.39.4/arch/x86/kernel/reboot.c
14426 --- linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:11:51.000000000 -0400
14427 +++ linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:12:20.000000000 -0400
14428 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14429 EXPORT_SYMBOL(pm_power_off);
14430
14431 static const struct desc_ptr no_idt = {};
14432 -static int reboot_mode;
14433 +static unsigned short reboot_mode;
14434 enum reboot_type reboot_type = BOOT_KBD;
14435 int reboot_force;
14436
14437 @@ -307,13 +307,17 @@ core_initcall(reboot_init);
14438 extern const unsigned char machine_real_restart_asm[];
14439 extern const u64 machine_real_restart_gdt[3];
14440
14441 -void machine_real_restart(unsigned int type)
14442 +__noreturn void machine_real_restart(unsigned int type)
14443 {
14444 void *restart_va;
14445 unsigned long restart_pa;
14446 - void (*restart_lowmem)(unsigned int);
14447 + void (* __noreturn restart_lowmem)(unsigned int);
14448 u64 *lowmem_gdt;
14449
14450 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14451 + struct desc_struct *gdt;
14452 +#endif
14453 +
14454 local_irq_disable();
14455
14456 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14457 @@ -339,14 +343,14 @@ void machine_real_restart(unsigned int t
14458 boot)". This seems like a fairly standard thing that gets set by
14459 REBOOT.COM programs, and the previous reset routine did this
14460 too. */
14461 - *((unsigned short *)0x472) = reboot_mode;
14462 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14463
14464 /* Patch the GDT in the low memory trampoline */
14465 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14466
14467 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14468 restart_pa = virt_to_phys(restart_va);
14469 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14470 + restart_lowmem = (void *)restart_pa;
14471
14472 /* GDT[0]: GDT self-pointer */
14473 lowmem_gdt[0] =
14474 @@ -357,7 +361,33 @@ void machine_real_restart(unsigned int t
14475 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14476
14477 /* Jump to the identity-mapped low memory code */
14478 +
14479 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14480 + gdt = get_cpu_gdt_table(smp_processor_id());
14481 + pax_open_kernel();
14482 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14483 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14484 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14485 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14486 +#endif
14487 +#ifdef CONFIG_PAX_KERNEXEC
14488 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14489 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14490 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14491 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14492 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14493 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14494 +#endif
14495 + pax_close_kernel();
14496 +#endif
14497 +
14498 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14499 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14500 + unreachable();
14501 +#else
14502 restart_lowmem(type);
14503 +#endif
14504 +
14505 }
14506 #ifdef CONFIG_APM_MODULE
14507 EXPORT_SYMBOL(machine_real_restart);
14508 @@ -486,7 +516,7 @@ void __attribute__((weak)) mach_reboot_f
14509 {
14510 }
14511
14512 -static void native_machine_emergency_restart(void)
14513 +__noreturn static void native_machine_emergency_restart(void)
14514 {
14515 int i;
14516
14517 @@ -601,13 +631,13 @@ void native_machine_shutdown(void)
14518 #endif
14519 }
14520
14521 -static void __machine_emergency_restart(int emergency)
14522 +static __noreturn void __machine_emergency_restart(int emergency)
14523 {
14524 reboot_emergency = emergency;
14525 machine_ops.emergency_restart();
14526 }
14527
14528 -static void native_machine_restart(char *__unused)
14529 +static __noreturn void native_machine_restart(char *__unused)
14530 {
14531 printk("machine restart\n");
14532
14533 @@ -616,7 +646,7 @@ static void native_machine_restart(char
14534 __machine_emergency_restart(0);
14535 }
14536
14537 -static void native_machine_halt(void)
14538 +static __noreturn void native_machine_halt(void)
14539 {
14540 /* stop other cpus and apics */
14541 machine_shutdown();
14542 @@ -627,7 +657,7 @@ static void native_machine_halt(void)
14543 stop_this_cpu(NULL);
14544 }
14545
14546 -static void native_machine_power_off(void)
14547 +__noreturn static void native_machine_power_off(void)
14548 {
14549 if (pm_power_off) {
14550 if (!reboot_force)
14551 @@ -636,6 +666,7 @@ static void native_machine_power_off(voi
14552 }
14553 /* a fallback in case there is no PM info available */
14554 tboot_shutdown(TB_SHUTDOWN_HALT);
14555 + unreachable();
14556 }
14557
14558 struct machine_ops machine_ops = {
14559 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup.c linux-2.6.39.4/arch/x86/kernel/setup.c
14560 --- linux-2.6.39.4/arch/x86/kernel/setup.c 2011-06-25 12:55:22.000000000 -0400
14561 +++ linux-2.6.39.4/arch/x86/kernel/setup.c 2011-08-05 19:44:35.000000000 -0400
14562 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14563 * area (640->1Mb) as ram even though it is not.
14564 * take them out.
14565 */
14566 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14567 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14568 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14569 }
14570
14571 @@ -775,14 +775,14 @@ void __init setup_arch(char **cmdline_p)
14572
14573 if (!boot_params.hdr.root_flags)
14574 root_mountflags &= ~MS_RDONLY;
14575 - init_mm.start_code = (unsigned long) _text;
14576 - init_mm.end_code = (unsigned long) _etext;
14577 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14578 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14579 init_mm.end_data = (unsigned long) _edata;
14580 init_mm.brk = _brk_end;
14581
14582 - code_resource.start = virt_to_phys(_text);
14583 - code_resource.end = virt_to_phys(_etext)-1;
14584 - data_resource.start = virt_to_phys(_etext);
14585 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14586 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14587 + data_resource.start = virt_to_phys(_sdata);
14588 data_resource.end = virt_to_phys(_edata)-1;
14589 bss_resource.start = virt_to_phys(&__bss_start);
14590 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14591 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup_percpu.c linux-2.6.39.4/arch/x86/kernel/setup_percpu.c
14592 --- linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-05-19 00:06:34.000000000 -0400
14593 +++ linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-08-05 19:44:35.000000000 -0400
14594 @@ -21,19 +21,17 @@
14595 #include <asm/cpu.h>
14596 #include <asm/stackprotector.h>
14597
14598 -DEFINE_PER_CPU(int, cpu_number);
14599 +#ifdef CONFIG_SMP
14600 +DEFINE_PER_CPU(unsigned int, cpu_number);
14601 EXPORT_PER_CPU_SYMBOL(cpu_number);
14602 +#endif
14603
14604 -#ifdef CONFIG_X86_64
14605 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14606 -#else
14607 -#define BOOT_PERCPU_OFFSET 0
14608 -#endif
14609
14610 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14611 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14612
14613 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14614 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14615 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14616 };
14617 EXPORT_SYMBOL(__per_cpu_offset);
14618 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14619 {
14620 #ifdef CONFIG_X86_32
14621 struct desc_struct gdt;
14622 + unsigned long base = per_cpu_offset(cpu);
14623
14624 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14625 - 0x2 | DESCTYPE_S, 0x8);
14626 - gdt.s = 1;
14627 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14628 + 0x83 | DESCTYPE_S, 0xC);
14629 write_gdt_entry(get_cpu_gdt_table(cpu),
14630 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14631 #endif
14632 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14633 /* alrighty, percpu areas up and running */
14634 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14635 for_each_possible_cpu(cpu) {
14636 +#ifdef CONFIG_CC_STACKPROTECTOR
14637 +#ifdef CONFIG_X86_32
14638 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14639 +#endif
14640 +#endif
14641 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14642 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14643 per_cpu(cpu_number, cpu) = cpu;
14644 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14645 */
14646 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14647 #endif
14648 +#ifdef CONFIG_CC_STACKPROTECTOR
14649 +#ifdef CONFIG_X86_32
14650 + if (!cpu)
14651 + per_cpu(stack_canary.canary, cpu) = canary;
14652 +#endif
14653 +#endif
14654 /*
14655 * Up to this point, the boot CPU has been using .init.data
14656 * area. Reload any changed state for the boot CPU.
14657 diff -urNp linux-2.6.39.4/arch/x86/kernel/signal.c linux-2.6.39.4/arch/x86/kernel/signal.c
14658 --- linux-2.6.39.4/arch/x86/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
14659 +++ linux-2.6.39.4/arch/x86/kernel/signal.c 2011-08-05 19:44:35.000000000 -0400
14660 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14661 * Align the stack pointer according to the i386 ABI,
14662 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14663 */
14664 - sp = ((sp + 4) & -16ul) - 4;
14665 + sp = ((sp - 12) & -16ul) - 4;
14666 #else /* !CONFIG_X86_32 */
14667 sp = round_down(sp, 16) - 8;
14668 #endif
14669 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14670 * Return an always-bogus address instead so we will die with SIGSEGV.
14671 */
14672 if (onsigstack && !likely(on_sig_stack(sp)))
14673 - return (void __user *)-1L;
14674 + return (__force void __user *)-1L;
14675
14676 /* save i387 state */
14677 if (used_math() && save_i387_xstate(*fpstate) < 0)
14678 - return (void __user *)-1L;
14679 + return (__force void __user *)-1L;
14680
14681 return (void __user *)sp;
14682 }
14683 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14684 }
14685
14686 if (current->mm->context.vdso)
14687 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14688 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14689 else
14690 - restorer = &frame->retcode;
14691 + restorer = (void __user *)&frame->retcode;
14692 if (ka->sa.sa_flags & SA_RESTORER)
14693 restorer = ka->sa.sa_restorer;
14694
14695 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14696 * reasons and because gdb uses it as a signature to notice
14697 * signal handler stack frames.
14698 */
14699 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14700 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14701
14702 if (err)
14703 return -EFAULT;
14704 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14705 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14706
14707 /* Set up to return from userspace. */
14708 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14709 + if (current->mm->context.vdso)
14710 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14711 + else
14712 + restorer = (void __user *)&frame->retcode;
14713 if (ka->sa.sa_flags & SA_RESTORER)
14714 restorer = ka->sa.sa_restorer;
14715 put_user_ex(restorer, &frame->pretcode);
14716 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14717 * reasons and because gdb uses it as a signature to notice
14718 * signal handler stack frames.
14719 */
14720 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14721 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14722 } put_user_catch(err);
14723
14724 if (err)
14725 @@ -773,6 +776,8 @@ static void do_signal(struct pt_regs *re
14726 int signr;
14727 sigset_t *oldset;
14728
14729 + pax_track_stack();
14730 +
14731 /*
14732 * We want the common case to go fast, which is why we may in certain
14733 * cases get here from kernel mode. Just return without doing anything
14734 @@ -780,7 +785,7 @@ static void do_signal(struct pt_regs *re
14735 * X86_32: vm86 regs switched out by assembly code before reaching
14736 * here, so testing against kernel CS suffices.
14737 */
14738 - if (!user_mode(regs))
14739 + if (!user_mode_novm(regs))
14740 return;
14741
14742 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14743 diff -urNp linux-2.6.39.4/arch/x86/kernel/smpboot.c linux-2.6.39.4/arch/x86/kernel/smpboot.c
14744 --- linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-06-25 12:55:22.000000000 -0400
14745 +++ linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-08-05 19:44:35.000000000 -0400
14746 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14747 set_idle_for_cpu(cpu, c_idle.idle);
14748 do_rest:
14749 per_cpu(current_task, cpu) = c_idle.idle;
14750 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14751 #ifdef CONFIG_X86_32
14752 /* Stack for startup_32 can be just as for start_secondary onwards */
14753 irq_ctx_init(cpu);
14754 #else
14755 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14756 initial_gs = per_cpu_offset(cpu);
14757 - per_cpu(kernel_stack, cpu) =
14758 - (unsigned long)task_stack_page(c_idle.idle) -
14759 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14760 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14761 #endif
14762 +
14763 + pax_open_kernel();
14764 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14765 + pax_close_kernel();
14766 +
14767 initial_code = (unsigned long)start_secondary;
14768 stack_start = c_idle.idle->thread.sp;
14769
14770 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14771
14772 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14773
14774 +#ifdef CONFIG_PAX_PER_CPU_PGD
14775 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14776 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14777 + KERNEL_PGD_PTRS);
14778 +#endif
14779 +
14780 err = do_boot_cpu(apicid, cpu);
14781 if (err) {
14782 pr_debug("do_boot_cpu failed %d\n", err);
14783 diff -urNp linux-2.6.39.4/arch/x86/kernel/step.c linux-2.6.39.4/arch/x86/kernel/step.c
14784 --- linux-2.6.39.4/arch/x86/kernel/step.c 2011-05-19 00:06:34.000000000 -0400
14785 +++ linux-2.6.39.4/arch/x86/kernel/step.c 2011-08-05 19:44:35.000000000 -0400
14786 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14787 struct desc_struct *desc;
14788 unsigned long base;
14789
14790 - seg &= ~7UL;
14791 + seg >>= 3;
14792
14793 mutex_lock(&child->mm->context.lock);
14794 - if (unlikely((seg >> 3) >= child->mm->context.size))
14795 + if (unlikely(seg >= child->mm->context.size))
14796 addr = -1L; /* bogus selector, access would fault */
14797 else {
14798 desc = child->mm->context.ldt + seg;
14799 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14800 addr += base;
14801 }
14802 mutex_unlock(&child->mm->context.lock);
14803 - }
14804 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14805 + addr = ktla_ktva(addr);
14806
14807 return addr;
14808 }
14809 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14810 unsigned char opcode[15];
14811 unsigned long addr = convert_ip_to_linear(child, regs);
14812
14813 + if (addr == -EINVAL)
14814 + return 0;
14815 +
14816 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14817 for (i = 0; i < copied; i++) {
14818 switch (opcode[i]) {
14819 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14820
14821 #ifdef CONFIG_X86_64
14822 case 0x40 ... 0x4f:
14823 - if (regs->cs != __USER_CS)
14824 + if ((regs->cs & 0xffff) != __USER_CS)
14825 /* 32-bit mode: register increment */
14826 return 0;
14827 /* 64-bit mode: REX prefix */
14828 diff -urNp linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S
14829 --- linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-05-19 00:06:34.000000000 -0400
14830 +++ linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-08-05 19:44:35.000000000 -0400
14831 @@ -1,3 +1,4 @@
14832 +.section .rodata,"a",@progbits
14833 ENTRY(sys_call_table)
14834 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14835 .long sys_exit
14836 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c
14837 --- linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-05-19 00:06:34.000000000 -0400
14838 +++ linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-08-05 19:44:35.000000000 -0400
14839 @@ -24,17 +24,224 @@
14840
14841 #include <asm/syscalls.h>
14842
14843 -/*
14844 - * Do a system call from kernel instead of calling sys_execve so we
14845 - * end up with proper pt_regs.
14846 - */
14847 -int kernel_execve(const char *filename,
14848 - const char *const argv[],
14849 - const char *const envp[])
14850 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14851 {
14852 - long __res;
14853 - asm volatile ("int $0x80"
14854 - : "=a" (__res)
14855 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14856 - return __res;
14857 + unsigned long pax_task_size = TASK_SIZE;
14858 +
14859 +#ifdef CONFIG_PAX_SEGMEXEC
14860 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14861 + pax_task_size = SEGMEXEC_TASK_SIZE;
14862 +#endif
14863 +
14864 + if (len > pax_task_size || addr > pax_task_size - len)
14865 + return -EINVAL;
14866 +
14867 + return 0;
14868 +}
14869 +
14870 +unsigned long
14871 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14872 + unsigned long len, unsigned long pgoff, unsigned long flags)
14873 +{
14874 + struct mm_struct *mm = current->mm;
14875 + struct vm_area_struct *vma;
14876 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14877 +
14878 +#ifdef CONFIG_PAX_SEGMEXEC
14879 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14880 + pax_task_size = SEGMEXEC_TASK_SIZE;
14881 +#endif
14882 +
14883 + pax_task_size -= PAGE_SIZE;
14884 +
14885 + if (len > pax_task_size)
14886 + return -ENOMEM;
14887 +
14888 + if (flags & MAP_FIXED)
14889 + return addr;
14890 +
14891 +#ifdef CONFIG_PAX_RANDMMAP
14892 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14893 +#endif
14894 +
14895 + if (addr) {
14896 + addr = PAGE_ALIGN(addr);
14897 + if (pax_task_size - len >= addr) {
14898 + vma = find_vma(mm, addr);
14899 + if (check_heap_stack_gap(vma, addr, len))
14900 + return addr;
14901 + }
14902 + }
14903 + if (len > mm->cached_hole_size) {
14904 + start_addr = addr = mm->free_area_cache;
14905 + } else {
14906 + start_addr = addr = mm->mmap_base;
14907 + mm->cached_hole_size = 0;
14908 + }
14909 +
14910 +#ifdef CONFIG_PAX_PAGEEXEC
14911 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14912 + start_addr = 0x00110000UL;
14913 +
14914 +#ifdef CONFIG_PAX_RANDMMAP
14915 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14916 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14917 +#endif
14918 +
14919 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14920 + start_addr = addr = mm->mmap_base;
14921 + else
14922 + addr = start_addr;
14923 + }
14924 +#endif
14925 +
14926 +full_search:
14927 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14928 + /* At this point: (!vma || addr < vma->vm_end). */
14929 + if (pax_task_size - len < addr) {
14930 + /*
14931 + * Start a new search - just in case we missed
14932 + * some holes.
14933 + */
14934 + if (start_addr != mm->mmap_base) {
14935 + start_addr = addr = mm->mmap_base;
14936 + mm->cached_hole_size = 0;
14937 + goto full_search;
14938 + }
14939 + return -ENOMEM;
14940 + }
14941 + if (check_heap_stack_gap(vma, addr, len))
14942 + break;
14943 + if (addr + mm->cached_hole_size < vma->vm_start)
14944 + mm->cached_hole_size = vma->vm_start - addr;
14945 + addr = vma->vm_end;
14946 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14947 + start_addr = addr = mm->mmap_base;
14948 + mm->cached_hole_size = 0;
14949 + goto full_search;
14950 + }
14951 + }
14952 +
14953 + /*
14954 + * Remember the place where we stopped the search:
14955 + */
14956 + mm->free_area_cache = addr + len;
14957 + return addr;
14958 +}
14959 +
14960 +unsigned long
14961 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14962 + const unsigned long len, const unsigned long pgoff,
14963 + const unsigned long flags)
14964 +{
14965 + struct vm_area_struct *vma;
14966 + struct mm_struct *mm = current->mm;
14967 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14968 +
14969 +#ifdef CONFIG_PAX_SEGMEXEC
14970 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14971 + pax_task_size = SEGMEXEC_TASK_SIZE;
14972 +#endif
14973 +
14974 + pax_task_size -= PAGE_SIZE;
14975 +
14976 + /* requested length too big for entire address space */
14977 + if (len > pax_task_size)
14978 + return -ENOMEM;
14979 +
14980 + if (flags & MAP_FIXED)
14981 + return addr;
14982 +
14983 +#ifdef CONFIG_PAX_PAGEEXEC
14984 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14985 + goto bottomup;
14986 +#endif
14987 +
14988 +#ifdef CONFIG_PAX_RANDMMAP
14989 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14990 +#endif
14991 +
14992 + /* requesting a specific address */
14993 + if (addr) {
14994 + addr = PAGE_ALIGN(addr);
14995 + if (pax_task_size - len >= addr) {
14996 + vma = find_vma(mm, addr);
14997 + if (check_heap_stack_gap(vma, addr, len))
14998 + return addr;
14999 + }
15000 + }
15001 +
15002 + /* check if free_area_cache is useful for us */
15003 + if (len <= mm->cached_hole_size) {
15004 + mm->cached_hole_size = 0;
15005 + mm->free_area_cache = mm->mmap_base;
15006 + }
15007 +
15008 + /* either no address requested or can't fit in requested address hole */
15009 + addr = mm->free_area_cache;
15010 +
15011 + /* make sure it can fit in the remaining address space */
15012 + if (addr > len) {
15013 + vma = find_vma(mm, addr-len);
15014 + if (check_heap_stack_gap(vma, addr - len, len))
15015 + /* remember the address as a hint for next time */
15016 + return (mm->free_area_cache = addr-len);
15017 + }
15018 +
15019 + if (mm->mmap_base < len)
15020 + goto bottomup;
15021 +
15022 + addr = mm->mmap_base-len;
15023 +
15024 + do {
15025 + /*
15026 + * Lookup failure means no vma is above this address,
15027 + * else if new region fits below vma->vm_start,
15028 + * return with success:
15029 + */
15030 + vma = find_vma(mm, addr);
15031 + if (check_heap_stack_gap(vma, addr, len))
15032 + /* remember the address as a hint for next time */
15033 + return (mm->free_area_cache = addr);
15034 +
15035 + /* remember the largest hole we saw so far */
15036 + if (addr + mm->cached_hole_size < vma->vm_start)
15037 + mm->cached_hole_size = vma->vm_start - addr;
15038 +
15039 + /* try just below the current vma->vm_start */
15040 + addr = skip_heap_stack_gap(vma, len);
15041 + } while (!IS_ERR_VALUE(addr));
15042 +
15043 +bottomup:
15044 + /*
15045 + * A failed mmap() very likely causes application failure,
15046 + * so fall back to the bottom-up function here. This scenario
15047 + * can happen with large stack limits and large mmap()
15048 + * allocations.
15049 + */
15050 +
15051 +#ifdef CONFIG_PAX_SEGMEXEC
15052 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15053 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15054 + else
15055 +#endif
15056 +
15057 + mm->mmap_base = TASK_UNMAPPED_BASE;
15058 +
15059 +#ifdef CONFIG_PAX_RANDMMAP
15060 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15061 + mm->mmap_base += mm->delta_mmap;
15062 +#endif
15063 +
15064 + mm->free_area_cache = mm->mmap_base;
15065 + mm->cached_hole_size = ~0UL;
15066 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15067 + /*
15068 + * Restore the topdown base:
15069 + */
15070 + mm->mmap_base = base;
15071 + mm->free_area_cache = base;
15072 + mm->cached_hole_size = ~0UL;
15073 +
15074 + return addr;
15075 }
15076 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c
15077 --- linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-05-19 00:06:34.000000000 -0400
15078 +++ linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-08-05 19:44:35.000000000 -0400
15079 @@ -32,8 +32,8 @@ out:
15080 return error;
15081 }
15082
15083 -static void find_start_end(unsigned long flags, unsigned long *begin,
15084 - unsigned long *end)
15085 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15086 + unsigned long *begin, unsigned long *end)
15087 {
15088 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15089 unsigned long new_begin;
15090 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15091 *begin = new_begin;
15092 }
15093 } else {
15094 - *begin = TASK_UNMAPPED_BASE;
15095 + *begin = mm->mmap_base;
15096 *end = TASK_SIZE;
15097 }
15098 }
15099 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15100 if (flags & MAP_FIXED)
15101 return addr;
15102
15103 - find_start_end(flags, &begin, &end);
15104 + find_start_end(mm, flags, &begin, &end);
15105
15106 if (len > end)
15107 return -ENOMEM;
15108
15109 +#ifdef CONFIG_PAX_RANDMMAP
15110 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15111 +#endif
15112 +
15113 if (addr) {
15114 addr = PAGE_ALIGN(addr);
15115 vma = find_vma(mm, addr);
15116 - if (end - len >= addr &&
15117 - (!vma || addr + len <= vma->vm_start))
15118 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15119 return addr;
15120 }
15121 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15122 @@ -106,7 +109,7 @@ full_search:
15123 }
15124 return -ENOMEM;
15125 }
15126 - if (!vma || addr + len <= vma->vm_start) {
15127 + if (check_heap_stack_gap(vma, addr, len)) {
15128 /*
15129 * Remember the place where we stopped the search:
15130 */
15131 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15132 {
15133 struct vm_area_struct *vma;
15134 struct mm_struct *mm = current->mm;
15135 - unsigned long addr = addr0;
15136 + unsigned long base = mm->mmap_base, addr = addr0;
15137
15138 /* requested length too big for entire address space */
15139 if (len > TASK_SIZE)
15140 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15141 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15142 goto bottomup;
15143
15144 +#ifdef CONFIG_PAX_RANDMMAP
15145 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15146 +#endif
15147 +
15148 /* requesting a specific address */
15149 if (addr) {
15150 addr = PAGE_ALIGN(addr);
15151 - vma = find_vma(mm, addr);
15152 - if (TASK_SIZE - len >= addr &&
15153 - (!vma || addr + len <= vma->vm_start))
15154 - return addr;
15155 + if (TASK_SIZE - len >= addr) {
15156 + vma = find_vma(mm, addr);
15157 + if (check_heap_stack_gap(vma, addr, len))
15158 + return addr;
15159 + }
15160 }
15161
15162 /* check if free_area_cache is useful for us */
15163 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15164 /* make sure it can fit in the remaining address space */
15165 if (addr > len) {
15166 vma = find_vma(mm, addr-len);
15167 - if (!vma || addr <= vma->vm_start)
15168 + if (check_heap_stack_gap(vma, addr - len, len))
15169 /* remember the address as a hint for next time */
15170 return mm->free_area_cache = addr-len;
15171 }
15172 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15173 * return with success:
15174 */
15175 vma = find_vma(mm, addr);
15176 - if (!vma || addr+len <= vma->vm_start)
15177 + if (check_heap_stack_gap(vma, addr, len))
15178 /* remember the address as a hint for next time */
15179 return mm->free_area_cache = addr;
15180
15181 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15182 mm->cached_hole_size = vma->vm_start - addr;
15183
15184 /* try just below the current vma->vm_start */
15185 - addr = vma->vm_start-len;
15186 - } while (len < vma->vm_start);
15187 + addr = skip_heap_stack_gap(vma, len);
15188 + } while (!IS_ERR_VALUE(addr));
15189
15190 bottomup:
15191 /*
15192 @@ -198,13 +206,21 @@ bottomup:
15193 * can happen with large stack limits and large mmap()
15194 * allocations.
15195 */
15196 + mm->mmap_base = TASK_UNMAPPED_BASE;
15197 +
15198 +#ifdef CONFIG_PAX_RANDMMAP
15199 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15200 + mm->mmap_base += mm->delta_mmap;
15201 +#endif
15202 +
15203 + mm->free_area_cache = mm->mmap_base;
15204 mm->cached_hole_size = ~0UL;
15205 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15206 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15207 /*
15208 * Restore the topdown base:
15209 */
15210 - mm->free_area_cache = mm->mmap_base;
15211 + mm->mmap_base = base;
15212 + mm->free_area_cache = base;
15213 mm->cached_hole_size = ~0UL;
15214
15215 return addr;
15216 diff -urNp linux-2.6.39.4/arch/x86/kernel/tboot.c linux-2.6.39.4/arch/x86/kernel/tboot.c
15217 --- linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-05-19 00:06:34.000000000 -0400
15218 +++ linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-08-05 19:44:35.000000000 -0400
15219 @@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
15220
15221 void tboot_shutdown(u32 shutdown_type)
15222 {
15223 - void (*shutdown)(void);
15224 + void (* __noreturn shutdown)(void);
15225
15226 if (!tboot_enabled())
15227 return;
15228 @@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
15229
15230 switch_to_tboot_pt();
15231
15232 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15233 + shutdown = (void *)tboot->shutdown_entry;
15234 shutdown();
15235
15236 /* should not reach here */
15237 @@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15238 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15239 }
15240
15241 -static atomic_t ap_wfs_count;
15242 +static atomic_unchecked_t ap_wfs_count;
15243
15244 static int tboot_wait_for_aps(int num_aps)
15245 {
15246 @@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
15247 {
15248 switch (action) {
15249 case CPU_DYING:
15250 - atomic_inc(&ap_wfs_count);
15251 + atomic_inc_unchecked(&ap_wfs_count);
15252 if (num_online_cpus() == 1)
15253 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15254 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15255 return NOTIFY_BAD;
15256 break;
15257 }
15258 @@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
15259
15260 tboot_create_trampoline();
15261
15262 - atomic_set(&ap_wfs_count, 0);
15263 + atomic_set_unchecked(&ap_wfs_count, 0);
15264 register_hotcpu_notifier(&tboot_cpu_notifier);
15265 return 0;
15266 }
15267 diff -urNp linux-2.6.39.4/arch/x86/kernel/time.c linux-2.6.39.4/arch/x86/kernel/time.c
15268 --- linux-2.6.39.4/arch/x86/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
15269 +++ linux-2.6.39.4/arch/x86/kernel/time.c 2011-08-05 19:44:35.000000000 -0400
15270 @@ -22,17 +22,13 @@
15271 #include <asm/hpet.h>
15272 #include <asm/time.h>
15273
15274 -#ifdef CONFIG_X86_64
15275 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
15276 -#endif
15277 -
15278 unsigned long profile_pc(struct pt_regs *regs)
15279 {
15280 unsigned long pc = instruction_pointer(regs);
15281
15282 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15283 + if (!user_mode(regs) && in_lock_functions(pc)) {
15284 #ifdef CONFIG_FRAME_POINTER
15285 - return *(unsigned long *)(regs->bp + sizeof(long));
15286 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15287 #else
15288 unsigned long *sp =
15289 (unsigned long *)kernel_stack_pointer(regs);
15290 @@ -41,11 +37,17 @@ unsigned long profile_pc(struct pt_regs
15291 * or above a saved flags. Eflags has bits 22-31 zero,
15292 * kernel addresses don't.
15293 */
15294 +
15295 +#ifdef CONFIG_PAX_KERNEXEC
15296 + return ktla_ktva(sp[0]);
15297 +#else
15298 if (sp[0] >> 22)
15299 return sp[0];
15300 if (sp[1] >> 22)
15301 return sp[1];
15302 #endif
15303 +
15304 +#endif
15305 }
15306 return pc;
15307 }
15308 diff -urNp linux-2.6.39.4/arch/x86/kernel/tls.c linux-2.6.39.4/arch/x86/kernel/tls.c
15309 --- linux-2.6.39.4/arch/x86/kernel/tls.c 2011-05-19 00:06:34.000000000 -0400
15310 +++ linux-2.6.39.4/arch/x86/kernel/tls.c 2011-08-05 19:44:35.000000000 -0400
15311 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15312 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15313 return -EINVAL;
15314
15315 +#ifdef CONFIG_PAX_SEGMEXEC
15316 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15317 + return -EINVAL;
15318 +#endif
15319 +
15320 set_tls_desc(p, idx, &info, 1);
15321
15322 return 0;
15323 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_32.S linux-2.6.39.4/arch/x86/kernel/trampoline_32.S
15324 --- linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-05-19 00:06:34.000000000 -0400
15325 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-08-05 19:44:35.000000000 -0400
15326 @@ -32,6 +32,12 @@
15327 #include <asm/segment.h>
15328 #include <asm/page_types.h>
15329
15330 +#ifdef CONFIG_PAX_KERNEXEC
15331 +#define ta(X) (X)
15332 +#else
15333 +#define ta(X) ((X) - __PAGE_OFFSET)
15334 +#endif
15335 +
15336 #ifdef CONFIG_SMP
15337
15338 .section ".x86_trampoline","a"
15339 @@ -62,7 +68,7 @@ r_base = .
15340 inc %ax # protected mode (PE) bit
15341 lmsw %ax # into protected mode
15342 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15343 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15344 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15345
15346 # These need to be in the same 64K segment as the above;
15347 # hence we don't use the boot_gdt_descr defined in head.S
15348 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_64.S linux-2.6.39.4/arch/x86/kernel/trampoline_64.S
15349 --- linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-05-19 00:06:34.000000000 -0400
15350 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-08-05 19:44:35.000000000 -0400
15351 @@ -90,7 +90,7 @@ startup_32:
15352 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15353 movl %eax, %ds
15354
15355 - movl $X86_CR4_PAE, %eax
15356 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15357 movl %eax, %cr4 # Enable PAE mode
15358
15359 # Setup trampoline 4 level pagetables
15360 @@ -138,7 +138,7 @@ tidt:
15361 # so the kernel can live anywhere
15362 .balign 4
15363 tgdt:
15364 - .short tgdt_end - tgdt # gdt limit
15365 + .short tgdt_end - tgdt - 1 # gdt limit
15366 .long tgdt - r_base
15367 .short 0
15368 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15369 diff -urNp linux-2.6.39.4/arch/x86/kernel/traps.c linux-2.6.39.4/arch/x86/kernel/traps.c
15370 --- linux-2.6.39.4/arch/x86/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
15371 +++ linux-2.6.39.4/arch/x86/kernel/traps.c 2011-08-05 19:44:35.000000000 -0400
15372 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15373
15374 /* Do we ignore FPU interrupts ? */
15375 char ignore_fpu_irq;
15376 -
15377 -/*
15378 - * The IDT has to be page-aligned to simplify the Pentium
15379 - * F0 0F bug workaround.
15380 - */
15381 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15382 #endif
15383
15384 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15385 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15386 }
15387
15388 static void __kprobes
15389 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15390 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15391 long error_code, siginfo_t *info)
15392 {
15393 struct task_struct *tsk = current;
15394
15395 #ifdef CONFIG_X86_32
15396 - if (regs->flags & X86_VM_MASK) {
15397 + if (v8086_mode(regs)) {
15398 /*
15399 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15400 * On nmi (interrupt 2), do_trap should not be called.
15401 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15402 }
15403 #endif
15404
15405 - if (!user_mode(regs))
15406 + if (!user_mode_novm(regs))
15407 goto kernel_trap;
15408
15409 #ifdef CONFIG_X86_32
15410 @@ -157,7 +151,7 @@ trap_signal:
15411 printk_ratelimit()) {
15412 printk(KERN_INFO
15413 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15414 - tsk->comm, tsk->pid, str,
15415 + tsk->comm, task_pid_nr(tsk), str,
15416 regs->ip, regs->sp, error_code);
15417 print_vma_addr(" in ", regs->ip);
15418 printk("\n");
15419 @@ -174,8 +168,20 @@ kernel_trap:
15420 if (!fixup_exception(regs)) {
15421 tsk->thread.error_code = error_code;
15422 tsk->thread.trap_no = trapnr;
15423 +
15424 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15425 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15426 + str = "PAX: suspicious stack segment fault";
15427 +#endif
15428 +
15429 die(str, regs, error_code);
15430 }
15431 +
15432 +#ifdef CONFIG_PAX_REFCOUNT
15433 + if (trapnr == 4)
15434 + pax_report_refcount_overflow(regs);
15435 +#endif
15436 +
15437 return;
15438
15439 #ifdef CONFIG_X86_32
15440 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15441 conditional_sti(regs);
15442
15443 #ifdef CONFIG_X86_32
15444 - if (regs->flags & X86_VM_MASK)
15445 + if (v8086_mode(regs))
15446 goto gp_in_vm86;
15447 #endif
15448
15449 tsk = current;
15450 - if (!user_mode(regs))
15451 + if (!user_mode_novm(regs))
15452 goto gp_in_kernel;
15453
15454 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15455 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15456 + struct mm_struct *mm = tsk->mm;
15457 + unsigned long limit;
15458 +
15459 + down_write(&mm->mmap_sem);
15460 + limit = mm->context.user_cs_limit;
15461 + if (limit < TASK_SIZE) {
15462 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15463 + up_write(&mm->mmap_sem);
15464 + return;
15465 + }
15466 + up_write(&mm->mmap_sem);
15467 + }
15468 +#endif
15469 +
15470 tsk->thread.error_code = error_code;
15471 tsk->thread.trap_no = 13;
15472
15473 @@ -304,6 +326,13 @@ gp_in_kernel:
15474 if (notify_die(DIE_GPF, "general protection fault", regs,
15475 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15476 return;
15477 +
15478 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15479 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15480 + die("PAX: suspicious general protection fault", regs, error_code);
15481 + else
15482 +#endif
15483 +
15484 die("general protection fault", regs, error_code);
15485 }
15486
15487 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15488 dotraplinkage notrace __kprobes void
15489 do_nmi(struct pt_regs *regs, long error_code)
15490 {
15491 +
15492 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15493 + if (!user_mode(regs)) {
15494 + unsigned long cs = regs->cs & 0xFFFF;
15495 + unsigned long ip = ktva_ktla(regs->ip);
15496 +
15497 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15498 + regs->ip = ip;
15499 + }
15500 +#endif
15501 +
15502 nmi_enter();
15503
15504 inc_irq_stat(__nmi_count);
15505 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15506 /* It's safe to allow irq's after DR6 has been saved */
15507 preempt_conditional_sti(regs);
15508
15509 - if (regs->flags & X86_VM_MASK) {
15510 + if (v8086_mode(regs)) {
15511 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15512 error_code, 1);
15513 preempt_conditional_cli(regs);
15514 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15515 * We already checked v86 mode above, so we can check for kernel mode
15516 * by just checking the CPL of CS.
15517 */
15518 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15519 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15520 tsk->thread.debugreg6 &= ~DR_STEP;
15521 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15522 regs->flags &= ~X86_EFLAGS_TF;
15523 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15524 return;
15525 conditional_sti(regs);
15526
15527 - if (!user_mode_vm(regs))
15528 + if (!user_mode(regs))
15529 {
15530 if (!fixup_exception(regs)) {
15531 task->thread.error_code = error_code;
15532 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15533 void __math_state_restore(void)
15534 {
15535 struct thread_info *thread = current_thread_info();
15536 - struct task_struct *tsk = thread->task;
15537 + struct task_struct *tsk = current;
15538
15539 /*
15540 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15541 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15542 */
15543 asmlinkage void math_state_restore(void)
15544 {
15545 - struct thread_info *thread = current_thread_info();
15546 - struct task_struct *tsk = thread->task;
15547 + struct task_struct *tsk = current;
15548
15549 if (!tsk_used_math(tsk)) {
15550 local_irq_enable();
15551 diff -urNp linux-2.6.39.4/arch/x86/kernel/verify_cpu.S linux-2.6.39.4/arch/x86/kernel/verify_cpu.S
15552 --- linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-05-19 00:06:34.000000000 -0400
15553 +++ linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-08-05 19:44:35.000000000 -0400
15554 @@ -20,6 +20,7 @@
15555 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15556 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15557 * arch/x86/kernel/head_32.S: processor startup
15558 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15559 *
15560 * verify_cpu, returns the status of longmode and SSE in register %eax.
15561 * 0: Success 1: Failure
15562 diff -urNp linux-2.6.39.4/arch/x86/kernel/vm86_32.c linux-2.6.39.4/arch/x86/kernel/vm86_32.c
15563 --- linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-05-19 00:06:34.000000000 -0400
15564 +++ linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-08-05 19:44:35.000000000 -0400
15565 @@ -41,6 +41,7 @@
15566 #include <linux/ptrace.h>
15567 #include <linux/audit.h>
15568 #include <linux/stddef.h>
15569 +#include <linux/grsecurity.h>
15570
15571 #include <asm/uaccess.h>
15572 #include <asm/io.h>
15573 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15574 do_exit(SIGSEGV);
15575 }
15576
15577 - tss = &per_cpu(init_tss, get_cpu());
15578 + tss = init_tss + get_cpu();
15579 current->thread.sp0 = current->thread.saved_sp0;
15580 current->thread.sysenter_cs = __KERNEL_CS;
15581 load_sp0(tss, &current->thread);
15582 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15583 struct task_struct *tsk;
15584 int tmp, ret = -EPERM;
15585
15586 +#ifdef CONFIG_GRKERNSEC_VM86
15587 + if (!capable(CAP_SYS_RAWIO)) {
15588 + gr_handle_vm86();
15589 + goto out;
15590 + }
15591 +#endif
15592 +
15593 tsk = current;
15594 if (tsk->thread.saved_sp0)
15595 goto out;
15596 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15597 int tmp, ret;
15598 struct vm86plus_struct __user *v86;
15599
15600 +#ifdef CONFIG_GRKERNSEC_VM86
15601 + if (!capable(CAP_SYS_RAWIO)) {
15602 + gr_handle_vm86();
15603 + ret = -EPERM;
15604 + goto out;
15605 + }
15606 +#endif
15607 +
15608 tsk = current;
15609 switch (cmd) {
15610 case VM86_REQUEST_IRQ:
15611 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15612 tsk->thread.saved_fs = info->regs32->fs;
15613 tsk->thread.saved_gs = get_user_gs(info->regs32);
15614
15615 - tss = &per_cpu(init_tss, get_cpu());
15616 + tss = init_tss + get_cpu();
15617 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15618 if (cpu_has_sep)
15619 tsk->thread.sysenter_cs = 0;
15620 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15621 goto cannot_handle;
15622 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15623 goto cannot_handle;
15624 - intr_ptr = (unsigned long __user *) (i << 2);
15625 + intr_ptr = (__force unsigned long __user *) (i << 2);
15626 if (get_user(segoffs, intr_ptr))
15627 goto cannot_handle;
15628 if ((segoffs >> 16) == BIOSSEG)
15629 diff -urNp linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S
15630 --- linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
15631 +++ linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-08-05 19:44:35.000000000 -0400
15632 @@ -26,6 +26,13 @@
15633 #include <asm/page_types.h>
15634 #include <asm/cache.h>
15635 #include <asm/boot.h>
15636 +#include <asm/segment.h>
15637 +
15638 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15639 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15640 +#else
15641 +#define __KERNEL_TEXT_OFFSET 0
15642 +#endif
15643
15644 #undef i386 /* in case the preprocessor is a 32bit one */
15645
15646 @@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
15647 #ifdef CONFIG_X86_32
15648 OUTPUT_ARCH(i386)
15649 ENTRY(phys_startup_32)
15650 -jiffies = jiffies_64;
15651 #else
15652 OUTPUT_ARCH(i386:x86-64)
15653 ENTRY(phys_startup_64)
15654 -jiffies_64 = jiffies;
15655 #endif
15656
15657 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
15658 @@ -69,31 +74,46 @@ jiffies_64 = jiffies;
15659
15660 PHDRS {
15661 text PT_LOAD FLAGS(5); /* R_E */
15662 +#ifdef CONFIG_X86_32
15663 + module PT_LOAD FLAGS(5); /* R_E */
15664 +#endif
15665 +#ifdef CONFIG_XEN
15666 + rodata PT_LOAD FLAGS(5); /* R_E */
15667 +#else
15668 + rodata PT_LOAD FLAGS(4); /* R__ */
15669 +#endif
15670 data PT_LOAD FLAGS(6); /* RW_ */
15671 #ifdef CONFIG_X86_64
15672 user PT_LOAD FLAGS(5); /* R_E */
15673 +#endif
15674 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15675 #ifdef CONFIG_SMP
15676 percpu PT_LOAD FLAGS(6); /* RW_ */
15677 #endif
15678 + text.init PT_LOAD FLAGS(5); /* R_E */
15679 + text.exit PT_LOAD FLAGS(5); /* R_E */
15680 init PT_LOAD FLAGS(7); /* RWE */
15681 -#endif
15682 note PT_NOTE FLAGS(0); /* ___ */
15683 }
15684
15685 SECTIONS
15686 {
15687 #ifdef CONFIG_X86_32
15688 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15689 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15690 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15691 #else
15692 - . = __START_KERNEL;
15693 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15694 + . = __START_KERNEL;
15695 #endif
15696
15697 /* Text and read-only data */
15698 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15699 - _text = .;
15700 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15701 /* bootstrapping code */
15702 +#ifdef CONFIG_X86_32
15703 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15704 +#else
15705 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15706 +#endif
15707 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15708 + _text = .;
15709 HEAD_TEXT
15710 #ifdef CONFIG_X86_32
15711 . = ALIGN(PAGE_SIZE);
15712 @@ -109,13 +129,47 @@ SECTIONS
15713 IRQENTRY_TEXT
15714 *(.fixup)
15715 *(.gnu.warning)
15716 - /* End of text section */
15717 - _etext = .;
15718 } :text = 0x9090
15719
15720 - NOTES :text :note
15721 + . += __KERNEL_TEXT_OFFSET;
15722 +
15723 +#ifdef CONFIG_X86_32
15724 + . = ALIGN(PAGE_SIZE);
15725 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15726 +
15727 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15728 + MODULES_EXEC_VADDR = .;
15729 + BYTE(0)
15730 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15731 + . = ALIGN(HPAGE_SIZE);
15732 + MODULES_EXEC_END = . - 1;
15733 +#endif
15734 +
15735 + } :module
15736 +#endif
15737 +
15738 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15739 + /* End of text section */
15740 + _etext = . - __KERNEL_TEXT_OFFSET;
15741 + }
15742
15743 - EXCEPTION_TABLE(16) :text = 0x9090
15744 +#ifdef CONFIG_X86_32
15745 + . = ALIGN(PAGE_SIZE);
15746 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15747 + *(.idt)
15748 + . = ALIGN(PAGE_SIZE);
15749 + *(.empty_zero_page)
15750 + *(.initial_pg_fixmap)
15751 + *(.initial_pg_pmd)
15752 + *(.initial_page_table)
15753 + *(.swapper_pg_dir)
15754 + } :rodata
15755 +#endif
15756 +
15757 + . = ALIGN(PAGE_SIZE);
15758 + NOTES :rodata :note
15759 +
15760 + EXCEPTION_TABLE(16) :rodata
15761
15762 #if defined(CONFIG_DEBUG_RODATA)
15763 /* .text should occupy whole number of pages */
15764 @@ -127,16 +181,20 @@ SECTIONS
15765
15766 /* Data */
15767 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15768 +
15769 +#ifdef CONFIG_PAX_KERNEXEC
15770 + . = ALIGN(HPAGE_SIZE);
15771 +#else
15772 + . = ALIGN(PAGE_SIZE);
15773 +#endif
15774 +
15775 /* Start of data section */
15776 _sdata = .;
15777
15778 /* init_task */
15779 INIT_TASK_DATA(THREAD_SIZE)
15780
15781 -#ifdef CONFIG_X86_32
15782 - /* 32 bit has nosave before _edata */
15783 NOSAVE_DATA
15784 -#endif
15785
15786 PAGE_ALIGNED_DATA(PAGE_SIZE)
15787
15788 @@ -145,6 +203,8 @@ SECTIONS
15789 DATA_DATA
15790 CONSTRUCTORS
15791
15792 + jiffies = jiffies_64;
15793 +
15794 /* rarely changed data like cpu maps */
15795 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
15796
15797 @@ -199,12 +259,6 @@ SECTIONS
15798 }
15799 vgetcpu_mode = VVIRT(.vgetcpu_mode);
15800
15801 - . = ALIGN(L1_CACHE_BYTES);
15802 - .jiffies : AT(VLOAD(.jiffies)) {
15803 - *(.jiffies)
15804 - }
15805 - jiffies = VVIRT(.jiffies);
15806 -
15807 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
15808 *(.vsyscall_3)
15809 }
15810 @@ -220,12 +274,19 @@ SECTIONS
15811 #endif /* CONFIG_X86_64 */
15812
15813 /* Init code and data - will be freed after init */
15814 - . = ALIGN(PAGE_SIZE);
15815 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15816 + BYTE(0)
15817 +
15818 +#ifdef CONFIG_PAX_KERNEXEC
15819 + . = ALIGN(HPAGE_SIZE);
15820 +#else
15821 + . = ALIGN(PAGE_SIZE);
15822 +#endif
15823 +
15824 __init_begin = .; /* paired with __init_end */
15825 - }
15826 + } :init.begin
15827
15828 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15829 +#ifdef CONFIG_SMP
15830 /*
15831 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15832 * output PHDR, so the next output section - .init.text - should
15833 @@ -234,12 +295,27 @@ SECTIONS
15834 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15835 #endif
15836
15837 - INIT_TEXT_SECTION(PAGE_SIZE)
15838 -#ifdef CONFIG_X86_64
15839 - :init
15840 -#endif
15841 + . = ALIGN(PAGE_SIZE);
15842 + init_begin = .;
15843 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15844 + VMLINUX_SYMBOL(_sinittext) = .;
15845 + INIT_TEXT
15846 + VMLINUX_SYMBOL(_einittext) = .;
15847 + . = ALIGN(PAGE_SIZE);
15848 + } :text.init
15849
15850 - INIT_DATA_SECTION(16)
15851 + /*
15852 + * .exit.text is discard at runtime, not link time, to deal with
15853 + * references from .altinstructions and .eh_frame
15854 + */
15855 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15856 + EXIT_TEXT
15857 + . = ALIGN(16);
15858 + } :text.exit
15859 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15860 +
15861 + . = ALIGN(PAGE_SIZE);
15862 + INIT_DATA_SECTION(16) :init
15863
15864 /*
15865 * Code and data for a variety of lowlevel trampolines, to be
15866 @@ -306,19 +382,12 @@ SECTIONS
15867 }
15868
15869 . = ALIGN(8);
15870 - /*
15871 - * .exit.text is discard at runtime, not link time, to deal with
15872 - * references from .altinstructions and .eh_frame
15873 - */
15874 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15875 - EXIT_TEXT
15876 - }
15877
15878 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15879 EXIT_DATA
15880 }
15881
15882 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15883 +#ifndef CONFIG_SMP
15884 PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
15885 #endif
15886
15887 @@ -337,16 +406,10 @@ SECTIONS
15888 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15889 __smp_locks = .;
15890 *(.smp_locks)
15891 - . = ALIGN(PAGE_SIZE);
15892 __smp_locks_end = .;
15893 + . = ALIGN(PAGE_SIZE);
15894 }
15895
15896 -#ifdef CONFIG_X86_64
15897 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15898 - NOSAVE_DATA
15899 - }
15900 -#endif
15901 -
15902 /* BSS */
15903 . = ALIGN(PAGE_SIZE);
15904 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15905 @@ -362,6 +425,7 @@ SECTIONS
15906 __brk_base = .;
15907 . += 64 * 1024; /* 64k alignment slop space */
15908 *(.brk_reservation) /* areas brk users have reserved */
15909 + . = ALIGN(HPAGE_SIZE);
15910 __brk_limit = .;
15911 }
15912
15913 @@ -388,13 +452,12 @@ SECTIONS
15914 * for the boot processor.
15915 */
15916 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15917 -INIT_PER_CPU(gdt_page);
15918 INIT_PER_CPU(irq_stack_union);
15919
15920 /*
15921 * Build-time check on the image size:
15922 */
15923 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15924 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15925 "kernel image bigger than KERNEL_IMAGE_SIZE");
15926
15927 #ifdef CONFIG_SMP
15928 diff -urNp linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c
15929 --- linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-05-19 00:06:34.000000000 -0400
15930 +++ linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-08-05 19:44:35.000000000 -0400
15931 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
15932
15933 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
15934 /* copy vsyscall data */
15935 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
15936 vsyscall_gtod_data.clock.vread = clock->vread;
15937 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
15938 vsyscall_gtod_data.clock.mask = clock->mask;
15939 @@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
15940 We do this here because otherwise user space would do it on
15941 its own in a likely inferior way (no access to jiffies).
15942 If you don't like it pass NULL. */
15943 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
15944 + if (tcache && tcache->blob[0] == (j = jiffies)) {
15945 p = tcache->blob[1];
15946 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
15947 /* Load per CPU data from RDTSCP */
15948 diff -urNp linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c
15949 --- linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-05-19 00:06:34.000000000 -0400
15950 +++ linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-05 19:44:35.000000000 -0400
15951 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15952 EXPORT_SYMBOL(copy_user_generic_string);
15953 EXPORT_SYMBOL(copy_user_generic_unrolled);
15954 EXPORT_SYMBOL(__copy_user_nocache);
15955 -EXPORT_SYMBOL(_copy_from_user);
15956 -EXPORT_SYMBOL(_copy_to_user);
15957
15958 EXPORT_SYMBOL(copy_page);
15959 EXPORT_SYMBOL(clear_page);
15960 diff -urNp linux-2.6.39.4/arch/x86/kernel/xsave.c linux-2.6.39.4/arch/x86/kernel/xsave.c
15961 --- linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-05-19 00:06:34.000000000 -0400
15962 +++ linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-08-05 19:44:35.000000000 -0400
15963 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15964 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15965 return -EINVAL;
15966
15967 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15968 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15969 fx_sw_user->extended_size -
15970 FP_XSTATE_MAGIC2_SIZE));
15971 if (err)
15972 @@ -267,7 +267,7 @@ fx_only:
15973 * the other extended state.
15974 */
15975 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15976 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15977 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15978 }
15979
15980 /*
15981 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15982 if (use_xsave())
15983 err = restore_user_xstate(buf);
15984 else
15985 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15986 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15987 buf);
15988 if (unlikely(err)) {
15989 /*
15990 diff -urNp linux-2.6.39.4/arch/x86/kvm/emulate.c linux-2.6.39.4/arch/x86/kvm/emulate.c
15991 --- linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-05-19 00:06:34.000000000 -0400
15992 +++ linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-08-05 19:44:35.000000000 -0400
15993 @@ -89,7 +89,7 @@
15994 #define Src2ImmByte (2<<29)
15995 #define Src2One (3<<29)
15996 #define Src2Imm (4<<29)
15997 -#define Src2Mask (7<<29)
15998 +#define Src2Mask (7U<<29)
15999
16000 #define X2(x...) x, x
16001 #define X3(x...) X2(x), x
16002 @@ -190,6 +190,7 @@ struct group_dual {
16003
16004 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16005 do { \
16006 + unsigned long _tmp; \
16007 __asm__ __volatile__ ( \
16008 _PRE_EFLAGS("0", "4", "2") \
16009 _op _suffix " %"_x"3,%1; " \
16010 @@ -203,8 +204,6 @@ struct group_dual {
16011 /* Raw emulation: instruction has two explicit operands. */
16012 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16013 do { \
16014 - unsigned long _tmp; \
16015 - \
16016 switch ((_dst).bytes) { \
16017 case 2: \
16018 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16019 @@ -220,7 +219,6 @@ struct group_dual {
16020
16021 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16022 do { \
16023 - unsigned long _tmp; \
16024 switch ((_dst).bytes) { \
16025 case 1: \
16026 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16027 diff -urNp linux-2.6.39.4/arch/x86/kvm/lapic.c linux-2.6.39.4/arch/x86/kvm/lapic.c
16028 --- linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-05-19 00:06:34.000000000 -0400
16029 +++ linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-08-05 19:44:35.000000000 -0400
16030 @@ -53,7 +53,7 @@
16031 #define APIC_BUS_CYCLE_NS 1
16032
16033 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16034 -#define apic_debug(fmt, arg...)
16035 +#define apic_debug(fmt, arg...) do {} while (0)
16036
16037 #define APIC_LVT_NUM 6
16038 /* 14 is the version for Xeon and Pentium 8.4.8*/
16039 diff -urNp linux-2.6.39.4/arch/x86/kvm/mmu.c linux-2.6.39.4/arch/x86/kvm/mmu.c
16040 --- linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-05-19 00:06:34.000000000 -0400
16041 +++ linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-08-05 19:44:35.000000000 -0400
16042 @@ -3240,7 +3240,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16043
16044 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16045
16046 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16047 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16048
16049 /*
16050 * Assume that the pte write on a page table of the same type
16051 @@ -3275,7 +3275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16052 smp_rmb();
16053
16054 spin_lock(&vcpu->kvm->mmu_lock);
16055 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16056 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16057 gentry = 0;
16058 kvm_mmu_free_some_pages(vcpu);
16059 ++vcpu->kvm->stat.mmu_pte_write;
16060 diff -urNp linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h
16061 --- linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-05-19 00:06:34.000000000 -0400
16062 +++ linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-08-05 19:44:35.000000000 -0400
16063 @@ -552,6 +552,8 @@ static int FNAME(page_fault)(struct kvm_
16064 unsigned long mmu_seq;
16065 bool map_writable;
16066
16067 + pax_track_stack();
16068 +
16069 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16070
16071 r = mmu_topup_memory_caches(vcpu);
16072 @@ -672,7 +674,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16073 if (need_flush)
16074 kvm_flush_remote_tlbs(vcpu->kvm);
16075
16076 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16077 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16078
16079 spin_unlock(&vcpu->kvm->mmu_lock);
16080
16081 diff -urNp linux-2.6.39.4/arch/x86/kvm/svm.c linux-2.6.39.4/arch/x86/kvm/svm.c
16082 --- linux-2.6.39.4/arch/x86/kvm/svm.c 2011-05-19 00:06:34.000000000 -0400
16083 +++ linux-2.6.39.4/arch/x86/kvm/svm.c 2011-08-05 20:34:06.000000000 -0400
16084 @@ -3278,7 +3278,11 @@ static void reload_tss(struct kvm_vcpu *
16085 int cpu = raw_smp_processor_id();
16086
16087 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16088 +
16089 + pax_open_kernel();
16090 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16091 + pax_close_kernel();
16092 +
16093 load_TR_desc();
16094 }
16095
16096 @@ -3656,6 +3660,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16097 #endif
16098 #endif
16099
16100 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16101 + __set_fs(current_thread_info()->addr_limit);
16102 +#endif
16103 +
16104 reload_tss(vcpu);
16105
16106 local_irq_disable();
16107 diff -urNp linux-2.6.39.4/arch/x86/kvm/vmx.c linux-2.6.39.4/arch/x86/kvm/vmx.c
16108 --- linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-05-19 00:06:34.000000000 -0400
16109 +++ linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-08-05 20:34:06.000000000 -0400
16110 @@ -725,7 +725,11 @@ static void reload_tss(void)
16111 struct desc_struct *descs;
16112
16113 descs = (void *)gdt->address;
16114 +
16115 + pax_open_kernel();
16116 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16117 + pax_close_kernel();
16118 +
16119 load_TR_desc();
16120 }
16121
16122 @@ -1648,8 +1652,11 @@ static __init int hardware_setup(void)
16123 if (!cpu_has_vmx_flexpriority())
16124 flexpriority_enabled = 0;
16125
16126 - if (!cpu_has_vmx_tpr_shadow())
16127 - kvm_x86_ops->update_cr8_intercept = NULL;
16128 + if (!cpu_has_vmx_tpr_shadow()) {
16129 + pax_open_kernel();
16130 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16131 + pax_close_kernel();
16132 + }
16133
16134 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16135 kvm_disable_largepages();
16136 @@ -2693,7 +2700,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16137 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16138
16139 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16140 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16141 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16142 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16143 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16144 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16145 @@ -4068,6 +4075,12 @@ static void __noclone vmx_vcpu_run(struc
16146 "jmp .Lkvm_vmx_return \n\t"
16147 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16148 ".Lkvm_vmx_return: "
16149 +
16150 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16151 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16152 + ".Lkvm_vmx_return2: "
16153 +#endif
16154 +
16155 /* Save guest registers, load host registers, keep flags */
16156 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16157 "pop %0 \n\t"
16158 @@ -4116,6 +4129,11 @@ static void __noclone vmx_vcpu_run(struc
16159 #endif
16160 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16161 [wordsize]"i"(sizeof(ulong))
16162 +
16163 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16164 + ,[cs]"i"(__KERNEL_CS)
16165 +#endif
16166 +
16167 : "cc", "memory"
16168 , R"ax", R"bx", R"di", R"si"
16169 #ifdef CONFIG_X86_64
16170 @@ -4130,7 +4148,16 @@ static void __noclone vmx_vcpu_run(struc
16171
16172 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16173
16174 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16175 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16176 +
16177 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16178 + loadsegment(fs, __KERNEL_PERCPU);
16179 +#endif
16180 +
16181 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16182 + __set_fs(current_thread_info()->addr_limit);
16183 +#endif
16184 +
16185 vmx->launched = 1;
16186
16187 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16188 diff -urNp linux-2.6.39.4/arch/x86/kvm/x86.c linux-2.6.39.4/arch/x86/kvm/x86.c
16189 --- linux-2.6.39.4/arch/x86/kvm/x86.c 2011-05-19 00:06:34.000000000 -0400
16190 +++ linux-2.6.39.4/arch/x86/kvm/x86.c 2011-08-05 20:34:06.000000000 -0400
16191 @@ -2050,6 +2050,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16192 if (n < msr_list.nmsrs)
16193 goto out;
16194 r = -EFAULT;
16195 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16196 + goto out;
16197 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16198 num_msrs_to_save * sizeof(u32)))
16199 goto out;
16200 @@ -2217,15 +2219,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16201 struct kvm_cpuid2 *cpuid,
16202 struct kvm_cpuid_entry2 __user *entries)
16203 {
16204 - int r;
16205 + int r, i;
16206
16207 r = -E2BIG;
16208 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16209 goto out;
16210 r = -EFAULT;
16211 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16212 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16213 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16214 goto out;
16215 + for (i = 0; i < cpuid->nent; ++i) {
16216 + struct kvm_cpuid_entry2 cpuid_entry;
16217 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16218 + goto out;
16219 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16220 + }
16221 vcpu->arch.cpuid_nent = cpuid->nent;
16222 kvm_apic_set_version(vcpu);
16223 kvm_x86_ops->cpuid_update(vcpu);
16224 @@ -2240,15 +2247,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16225 struct kvm_cpuid2 *cpuid,
16226 struct kvm_cpuid_entry2 __user *entries)
16227 {
16228 - int r;
16229 + int r, i;
16230
16231 r = -E2BIG;
16232 if (cpuid->nent < vcpu->arch.cpuid_nent)
16233 goto out;
16234 r = -EFAULT;
16235 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16236 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16237 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16238 goto out;
16239 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16240 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16241 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16242 + goto out;
16243 + }
16244 return 0;
16245
16246 out:
16247 @@ -2526,7 +2537,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16248 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16249 struct kvm_interrupt *irq)
16250 {
16251 - if (irq->irq < 0 || irq->irq >= 256)
16252 + if (irq->irq >= 256)
16253 return -EINVAL;
16254 if (irqchip_in_kernel(vcpu->kvm))
16255 return -ENXIO;
16256 @@ -4690,7 +4701,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16257 }
16258 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16259
16260 -int kvm_arch_init(void *opaque)
16261 +int kvm_arch_init(const void *opaque)
16262 {
16263 int r;
16264 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16265 diff -urNp linux-2.6.39.4/arch/x86/lguest/boot.c linux-2.6.39.4/arch/x86/lguest/boot.c
16266 --- linux-2.6.39.4/arch/x86/lguest/boot.c 2011-06-25 12:55:22.000000000 -0400
16267 +++ linux-2.6.39.4/arch/x86/lguest/boot.c 2011-08-05 20:34:06.000000000 -0400
16268 @@ -1178,9 +1178,10 @@ static __init int early_put_chars(u32 vt
16269 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16270 * Launcher to reboot us.
16271 */
16272 -static void lguest_restart(char *reason)
16273 +static __noreturn void lguest_restart(char *reason)
16274 {
16275 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16276 + BUG();
16277 }
16278
16279 /*G:050
16280 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_32.c linux-2.6.39.4/arch/x86/lib/atomic64_32.c
16281 --- linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-05-19 00:06:34.000000000 -0400
16282 +++ linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-08-05 19:44:35.000000000 -0400
16283 @@ -8,18 +8,30 @@
16284
16285 long long atomic64_read_cx8(long long, const atomic64_t *v);
16286 EXPORT_SYMBOL(atomic64_read_cx8);
16287 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16288 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16289 long long atomic64_set_cx8(long long, const atomic64_t *v);
16290 EXPORT_SYMBOL(atomic64_set_cx8);
16291 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16292 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16293 long long atomic64_xchg_cx8(long long, unsigned high);
16294 EXPORT_SYMBOL(atomic64_xchg_cx8);
16295 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16296 EXPORT_SYMBOL(atomic64_add_return_cx8);
16297 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16298 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16299 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16300 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16301 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16302 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16303 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16304 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16305 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16306 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16307 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16308 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16309 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16310 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16311 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16312 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16313 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16314 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16315 #ifndef CONFIG_X86_CMPXCHG64
16316 long long atomic64_read_386(long long, const atomic64_t *v);
16317 EXPORT_SYMBOL(atomic64_read_386);
16318 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16319 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16320 long long atomic64_set_386(long long, const atomic64_t *v);
16321 EXPORT_SYMBOL(atomic64_set_386);
16322 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16323 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16324 long long atomic64_xchg_386(long long, unsigned high);
16325 EXPORT_SYMBOL(atomic64_xchg_386);
16326 long long atomic64_add_return_386(long long a, atomic64_t *v);
16327 EXPORT_SYMBOL(atomic64_add_return_386);
16328 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16329 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16330 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16331 EXPORT_SYMBOL(atomic64_sub_return_386);
16332 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16333 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16334 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16335 EXPORT_SYMBOL(atomic64_inc_return_386);
16336 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16337 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16338 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16339 EXPORT_SYMBOL(atomic64_dec_return_386);
16340 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16341 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16342 long long atomic64_add_386(long long a, atomic64_t *v);
16343 EXPORT_SYMBOL(atomic64_add_386);
16344 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16345 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16346 long long atomic64_sub_386(long long a, atomic64_t *v);
16347 EXPORT_SYMBOL(atomic64_sub_386);
16348 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16349 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16350 long long atomic64_inc_386(long long a, atomic64_t *v);
16351 EXPORT_SYMBOL(atomic64_inc_386);
16352 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16353 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16354 long long atomic64_dec_386(long long a, atomic64_t *v);
16355 EXPORT_SYMBOL(atomic64_dec_386);
16356 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16357 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16358 long long atomic64_dec_if_positive_386(atomic64_t *v);
16359 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16360 int atomic64_inc_not_zero_386(atomic64_t *v);
16361 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S
16362 --- linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-05-19 00:06:34.000000000 -0400
16363 +++ linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-08-05 19:44:35.000000000 -0400
16364 @@ -48,6 +48,10 @@ BEGIN(read)
16365 movl (v), %eax
16366 movl 4(v), %edx
16367 RET_ENDP
16368 +BEGIN(read_unchecked)
16369 + movl (v), %eax
16370 + movl 4(v), %edx
16371 +RET_ENDP
16372 #undef v
16373
16374 #define v %esi
16375 @@ -55,6 +59,10 @@ BEGIN(set)
16376 movl %ebx, (v)
16377 movl %ecx, 4(v)
16378 RET_ENDP
16379 +BEGIN(set_unchecked)
16380 + movl %ebx, (v)
16381 + movl %ecx, 4(v)
16382 +RET_ENDP
16383 #undef v
16384
16385 #define v %esi
16386 @@ -70,6 +78,20 @@ RET_ENDP
16387 BEGIN(add)
16388 addl %eax, (v)
16389 adcl %edx, 4(v)
16390 +
16391 +#ifdef CONFIG_PAX_REFCOUNT
16392 + jno 0f
16393 + subl %eax, (v)
16394 + sbbl %edx, 4(v)
16395 + int $4
16396 +0:
16397 + _ASM_EXTABLE(0b, 0b)
16398 +#endif
16399 +
16400 +RET_ENDP
16401 +BEGIN(add_unchecked)
16402 + addl %eax, (v)
16403 + adcl %edx, 4(v)
16404 RET_ENDP
16405 #undef v
16406
16407 @@ -77,6 +99,24 @@ RET_ENDP
16408 BEGIN(add_return)
16409 addl (v), %eax
16410 adcl 4(v), %edx
16411 +
16412 +#ifdef CONFIG_PAX_REFCOUNT
16413 + into
16414 +1234:
16415 + _ASM_EXTABLE(1234b, 2f)
16416 +#endif
16417 +
16418 + movl %eax, (v)
16419 + movl %edx, 4(v)
16420 +
16421 +#ifdef CONFIG_PAX_REFCOUNT
16422 +2:
16423 +#endif
16424 +
16425 +RET_ENDP
16426 +BEGIN(add_return_unchecked)
16427 + addl (v), %eax
16428 + adcl 4(v), %edx
16429 movl %eax, (v)
16430 movl %edx, 4(v)
16431 RET_ENDP
16432 @@ -86,6 +126,20 @@ RET_ENDP
16433 BEGIN(sub)
16434 subl %eax, (v)
16435 sbbl %edx, 4(v)
16436 +
16437 +#ifdef CONFIG_PAX_REFCOUNT
16438 + jno 0f
16439 + addl %eax, (v)
16440 + adcl %edx, 4(v)
16441 + int $4
16442 +0:
16443 + _ASM_EXTABLE(0b, 0b)
16444 +#endif
16445 +
16446 +RET_ENDP
16447 +BEGIN(sub_unchecked)
16448 + subl %eax, (v)
16449 + sbbl %edx, 4(v)
16450 RET_ENDP
16451 #undef v
16452
16453 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16454 sbbl $0, %edx
16455 addl (v), %eax
16456 adcl 4(v), %edx
16457 +
16458 +#ifdef CONFIG_PAX_REFCOUNT
16459 + into
16460 +1234:
16461 + _ASM_EXTABLE(1234b, 2f)
16462 +#endif
16463 +
16464 + movl %eax, (v)
16465 + movl %edx, 4(v)
16466 +
16467 +#ifdef CONFIG_PAX_REFCOUNT
16468 +2:
16469 +#endif
16470 +
16471 +RET_ENDP
16472 +BEGIN(sub_return_unchecked)
16473 + negl %edx
16474 + negl %eax
16475 + sbbl $0, %edx
16476 + addl (v), %eax
16477 + adcl 4(v), %edx
16478 movl %eax, (v)
16479 movl %edx, 4(v)
16480 RET_ENDP
16481 @@ -105,6 +180,20 @@ RET_ENDP
16482 BEGIN(inc)
16483 addl $1, (v)
16484 adcl $0, 4(v)
16485 +
16486 +#ifdef CONFIG_PAX_REFCOUNT
16487 + jno 0f
16488 + subl $1, (v)
16489 + sbbl $0, 4(v)
16490 + int $4
16491 +0:
16492 + _ASM_EXTABLE(0b, 0b)
16493 +#endif
16494 +
16495 +RET_ENDP
16496 +BEGIN(inc_unchecked)
16497 + addl $1, (v)
16498 + adcl $0, 4(v)
16499 RET_ENDP
16500 #undef v
16501
16502 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16503 movl 4(v), %edx
16504 addl $1, %eax
16505 adcl $0, %edx
16506 +
16507 +#ifdef CONFIG_PAX_REFCOUNT
16508 + into
16509 +1234:
16510 + _ASM_EXTABLE(1234b, 2f)
16511 +#endif
16512 +
16513 + movl %eax, (v)
16514 + movl %edx, 4(v)
16515 +
16516 +#ifdef CONFIG_PAX_REFCOUNT
16517 +2:
16518 +#endif
16519 +
16520 +RET_ENDP
16521 +BEGIN(inc_return_unchecked)
16522 + movl (v), %eax
16523 + movl 4(v), %edx
16524 + addl $1, %eax
16525 + adcl $0, %edx
16526 movl %eax, (v)
16527 movl %edx, 4(v)
16528 RET_ENDP
16529 @@ -123,6 +232,20 @@ RET_ENDP
16530 BEGIN(dec)
16531 subl $1, (v)
16532 sbbl $0, 4(v)
16533 +
16534 +#ifdef CONFIG_PAX_REFCOUNT
16535 + jno 0f
16536 + addl $1, (v)
16537 + adcl $0, 4(v)
16538 + int $4
16539 +0:
16540 + _ASM_EXTABLE(0b, 0b)
16541 +#endif
16542 +
16543 +RET_ENDP
16544 +BEGIN(dec_unchecked)
16545 + subl $1, (v)
16546 + sbbl $0, 4(v)
16547 RET_ENDP
16548 #undef v
16549
16550 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16551 movl 4(v), %edx
16552 subl $1, %eax
16553 sbbl $0, %edx
16554 +
16555 +#ifdef CONFIG_PAX_REFCOUNT
16556 + into
16557 +1234:
16558 + _ASM_EXTABLE(1234b, 2f)
16559 +#endif
16560 +
16561 + movl %eax, (v)
16562 + movl %edx, 4(v)
16563 +
16564 +#ifdef CONFIG_PAX_REFCOUNT
16565 +2:
16566 +#endif
16567 +
16568 +RET_ENDP
16569 +BEGIN(dec_return_unchecked)
16570 + movl (v), %eax
16571 + movl 4(v), %edx
16572 + subl $1, %eax
16573 + sbbl $0, %edx
16574 movl %eax, (v)
16575 movl %edx, 4(v)
16576 RET_ENDP
16577 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16578 adcl %edx, %edi
16579 addl (v), %eax
16580 adcl 4(v), %edx
16581 +
16582 +#ifdef CONFIG_PAX_REFCOUNT
16583 + into
16584 +1234:
16585 + _ASM_EXTABLE(1234b, 2f)
16586 +#endif
16587 +
16588 cmpl %eax, %esi
16589 je 3f
16590 1:
16591 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16592 1:
16593 addl $1, %eax
16594 adcl $0, %edx
16595 +
16596 +#ifdef CONFIG_PAX_REFCOUNT
16597 + into
16598 +1234:
16599 + _ASM_EXTABLE(1234b, 2f)
16600 +#endif
16601 +
16602 movl %eax, (v)
16603 movl %edx, 4(v)
16604 movl $1, %eax
16605 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16606 movl 4(v), %edx
16607 subl $1, %eax
16608 sbbl $0, %edx
16609 +
16610 +#ifdef CONFIG_PAX_REFCOUNT
16611 + into
16612 +1234:
16613 + _ASM_EXTABLE(1234b, 1f)
16614 +#endif
16615 +
16616 js 1f
16617 movl %eax, (v)
16618 movl %edx, 4(v)
16619 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S
16620 --- linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-05-19 00:06:34.000000000 -0400
16621 +++ linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-05 19:44:35.000000000 -0400
16622 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16623 CFI_ENDPROC
16624 ENDPROC(atomic64_read_cx8)
16625
16626 +ENTRY(atomic64_read_unchecked_cx8)
16627 + CFI_STARTPROC
16628 +
16629 + read64 %ecx
16630 + ret
16631 + CFI_ENDPROC
16632 +ENDPROC(atomic64_read_unchecked_cx8)
16633 +
16634 ENTRY(atomic64_set_cx8)
16635 CFI_STARTPROC
16636
16637 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16638 CFI_ENDPROC
16639 ENDPROC(atomic64_set_cx8)
16640
16641 +ENTRY(atomic64_set_unchecked_cx8)
16642 + CFI_STARTPROC
16643 +
16644 +1:
16645 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16646 + * are atomic on 586 and newer */
16647 + cmpxchg8b (%esi)
16648 + jne 1b
16649 +
16650 + ret
16651 + CFI_ENDPROC
16652 +ENDPROC(atomic64_set_unchecked_cx8)
16653 +
16654 ENTRY(atomic64_xchg_cx8)
16655 CFI_STARTPROC
16656
16657 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16658 CFI_ENDPROC
16659 ENDPROC(atomic64_xchg_cx8)
16660
16661 -.macro addsub_return func ins insc
16662 -ENTRY(atomic64_\func\()_return_cx8)
16663 +.macro addsub_return func ins insc unchecked=""
16664 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16665 CFI_STARTPROC
16666 SAVE ebp
16667 SAVE ebx
16668 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16669 movl %edx, %ecx
16670 \ins\()l %esi, %ebx
16671 \insc\()l %edi, %ecx
16672 +
16673 +.ifb \unchecked
16674 +#ifdef CONFIG_PAX_REFCOUNT
16675 + into
16676 +2:
16677 + _ASM_EXTABLE(2b, 3f)
16678 +#endif
16679 +.endif
16680 +
16681 LOCK_PREFIX
16682 cmpxchg8b (%ebp)
16683 jne 1b
16684 -
16685 -10:
16686 movl %ebx, %eax
16687 movl %ecx, %edx
16688 +
16689 +.ifb \unchecked
16690 +#ifdef CONFIG_PAX_REFCOUNT
16691 +3:
16692 +#endif
16693 +.endif
16694 +
16695 RESTORE edi
16696 RESTORE esi
16697 RESTORE ebx
16698 RESTORE ebp
16699 ret
16700 CFI_ENDPROC
16701 -ENDPROC(atomic64_\func\()_return_cx8)
16702 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16703 .endm
16704
16705 addsub_return add add adc
16706 addsub_return sub sub sbb
16707 +addsub_return add add adc _unchecked
16708 +addsub_return sub sub sbb _unchecked
16709
16710 -.macro incdec_return func ins insc
16711 -ENTRY(atomic64_\func\()_return_cx8)
16712 +.macro incdec_return func ins insc unchecked
16713 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16714 CFI_STARTPROC
16715 SAVE ebx
16716
16717 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16718 movl %edx, %ecx
16719 \ins\()l $1, %ebx
16720 \insc\()l $0, %ecx
16721 +
16722 +.ifb \unchecked
16723 +#ifdef CONFIG_PAX_REFCOUNT
16724 + into
16725 +2:
16726 + _ASM_EXTABLE(2b, 3f)
16727 +#endif
16728 +.endif
16729 +
16730 LOCK_PREFIX
16731 cmpxchg8b (%esi)
16732 jne 1b
16733
16734 -10:
16735 movl %ebx, %eax
16736 movl %ecx, %edx
16737 +
16738 +.ifb \unchecked
16739 +#ifdef CONFIG_PAX_REFCOUNT
16740 +3:
16741 +#endif
16742 +.endif
16743 +
16744 RESTORE ebx
16745 ret
16746 CFI_ENDPROC
16747 -ENDPROC(atomic64_\func\()_return_cx8)
16748 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16749 .endm
16750
16751 incdec_return inc add adc
16752 incdec_return dec sub sbb
16753 +incdec_return inc add adc _unchecked
16754 +incdec_return dec sub sbb _unchecked
16755
16756 ENTRY(atomic64_dec_if_positive_cx8)
16757 CFI_STARTPROC
16758 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16759 movl %edx, %ecx
16760 subl $1, %ebx
16761 sbb $0, %ecx
16762 +
16763 +#ifdef CONFIG_PAX_REFCOUNT
16764 + into
16765 +1234:
16766 + _ASM_EXTABLE(1234b, 2f)
16767 +#endif
16768 +
16769 js 2f
16770 LOCK_PREFIX
16771 cmpxchg8b (%esi)
16772 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16773 movl %edx, %ecx
16774 addl %esi, %ebx
16775 adcl %edi, %ecx
16776 +
16777 +#ifdef CONFIG_PAX_REFCOUNT
16778 + into
16779 +1234:
16780 + _ASM_EXTABLE(1234b, 3f)
16781 +#endif
16782 +
16783 LOCK_PREFIX
16784 cmpxchg8b (%ebp)
16785 jne 1b
16786 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16787 movl %edx, %ecx
16788 addl $1, %ebx
16789 adcl $0, %ecx
16790 +
16791 +#ifdef CONFIG_PAX_REFCOUNT
16792 + into
16793 +1234:
16794 + _ASM_EXTABLE(1234b, 3f)
16795 +#endif
16796 +
16797 LOCK_PREFIX
16798 cmpxchg8b (%esi)
16799 jne 1b
16800 diff -urNp linux-2.6.39.4/arch/x86/lib/checksum_32.S linux-2.6.39.4/arch/x86/lib/checksum_32.S
16801 --- linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-05-19 00:06:34.000000000 -0400
16802 +++ linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-08-05 19:44:35.000000000 -0400
16803 @@ -28,7 +28,8 @@
16804 #include <linux/linkage.h>
16805 #include <asm/dwarf2.h>
16806 #include <asm/errno.h>
16807 -
16808 +#include <asm/segment.h>
16809 +
16810 /*
16811 * computes a partial checksum, e.g. for TCP/UDP fragments
16812 */
16813 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16814
16815 #define ARGBASE 16
16816 #define FP 12
16817 -
16818 -ENTRY(csum_partial_copy_generic)
16819 +
16820 +ENTRY(csum_partial_copy_generic_to_user)
16821 CFI_STARTPROC
16822 +
16823 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16824 + pushl_cfi %gs
16825 + popl_cfi %es
16826 + jmp csum_partial_copy_generic
16827 +#endif
16828 +
16829 +ENTRY(csum_partial_copy_generic_from_user)
16830 +
16831 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16832 + pushl_cfi %gs
16833 + popl_cfi %ds
16834 +#endif
16835 +
16836 +ENTRY(csum_partial_copy_generic)
16837 subl $4,%esp
16838 CFI_ADJUST_CFA_OFFSET 4
16839 pushl_cfi %edi
16840 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16841 jmp 4f
16842 SRC(1: movw (%esi), %bx )
16843 addl $2, %esi
16844 -DST( movw %bx, (%edi) )
16845 +DST( movw %bx, %es:(%edi) )
16846 addl $2, %edi
16847 addw %bx, %ax
16848 adcl $0, %eax
16849 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16850 SRC(1: movl (%esi), %ebx )
16851 SRC( movl 4(%esi), %edx )
16852 adcl %ebx, %eax
16853 -DST( movl %ebx, (%edi) )
16854 +DST( movl %ebx, %es:(%edi) )
16855 adcl %edx, %eax
16856 -DST( movl %edx, 4(%edi) )
16857 +DST( movl %edx, %es:4(%edi) )
16858
16859 SRC( movl 8(%esi), %ebx )
16860 SRC( movl 12(%esi), %edx )
16861 adcl %ebx, %eax
16862 -DST( movl %ebx, 8(%edi) )
16863 +DST( movl %ebx, %es:8(%edi) )
16864 adcl %edx, %eax
16865 -DST( movl %edx, 12(%edi) )
16866 +DST( movl %edx, %es:12(%edi) )
16867
16868 SRC( movl 16(%esi), %ebx )
16869 SRC( movl 20(%esi), %edx )
16870 adcl %ebx, %eax
16871 -DST( movl %ebx, 16(%edi) )
16872 +DST( movl %ebx, %es:16(%edi) )
16873 adcl %edx, %eax
16874 -DST( movl %edx, 20(%edi) )
16875 +DST( movl %edx, %es:20(%edi) )
16876
16877 SRC( movl 24(%esi), %ebx )
16878 SRC( movl 28(%esi), %edx )
16879 adcl %ebx, %eax
16880 -DST( movl %ebx, 24(%edi) )
16881 +DST( movl %ebx, %es:24(%edi) )
16882 adcl %edx, %eax
16883 -DST( movl %edx, 28(%edi) )
16884 +DST( movl %edx, %es:28(%edi) )
16885
16886 lea 32(%esi), %esi
16887 lea 32(%edi), %edi
16888 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16889 shrl $2, %edx # This clears CF
16890 SRC(3: movl (%esi), %ebx )
16891 adcl %ebx, %eax
16892 -DST( movl %ebx, (%edi) )
16893 +DST( movl %ebx, %es:(%edi) )
16894 lea 4(%esi), %esi
16895 lea 4(%edi), %edi
16896 dec %edx
16897 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16898 jb 5f
16899 SRC( movw (%esi), %cx )
16900 leal 2(%esi), %esi
16901 -DST( movw %cx, (%edi) )
16902 +DST( movw %cx, %es:(%edi) )
16903 leal 2(%edi), %edi
16904 je 6f
16905 shll $16,%ecx
16906 SRC(5: movb (%esi), %cl )
16907 -DST( movb %cl, (%edi) )
16908 +DST( movb %cl, %es:(%edi) )
16909 6: addl %ecx, %eax
16910 adcl $0, %eax
16911 7:
16912 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16913
16914 6001:
16915 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16916 - movl $-EFAULT, (%ebx)
16917 + movl $-EFAULT, %ss:(%ebx)
16918
16919 # zero the complete destination - computing the rest
16920 # is too much work
16921 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16922
16923 6002:
16924 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16925 - movl $-EFAULT,(%ebx)
16926 + movl $-EFAULT,%ss:(%ebx)
16927 jmp 5000b
16928
16929 .previous
16930
16931 + pushl_cfi %ss
16932 + popl_cfi %ds
16933 + pushl_cfi %ss
16934 + popl_cfi %es
16935 popl_cfi %ebx
16936 CFI_RESTORE ebx
16937 popl_cfi %esi
16938 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16939 popl_cfi %ecx # equivalent to addl $4,%esp
16940 ret
16941 CFI_ENDPROC
16942 -ENDPROC(csum_partial_copy_generic)
16943 +ENDPROC(csum_partial_copy_generic_to_user)
16944
16945 #else
16946
16947 /* Version for PentiumII/PPro */
16948
16949 #define ROUND1(x) \
16950 + nop; nop; nop; \
16951 SRC(movl x(%esi), %ebx ) ; \
16952 addl %ebx, %eax ; \
16953 - DST(movl %ebx, x(%edi) ) ;
16954 + DST(movl %ebx, %es:x(%edi)) ;
16955
16956 #define ROUND(x) \
16957 + nop; nop; nop; \
16958 SRC(movl x(%esi), %ebx ) ; \
16959 adcl %ebx, %eax ; \
16960 - DST(movl %ebx, x(%edi) ) ;
16961 + DST(movl %ebx, %es:x(%edi)) ;
16962
16963 #define ARGBASE 12
16964 -
16965 -ENTRY(csum_partial_copy_generic)
16966 +
16967 +ENTRY(csum_partial_copy_generic_to_user)
16968 CFI_STARTPROC
16969 +
16970 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16971 + pushl_cfi %gs
16972 + popl_cfi %es
16973 + jmp csum_partial_copy_generic
16974 +#endif
16975 +
16976 +ENTRY(csum_partial_copy_generic_from_user)
16977 +
16978 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16979 + pushl_cfi %gs
16980 + popl_cfi %ds
16981 +#endif
16982 +
16983 +ENTRY(csum_partial_copy_generic)
16984 pushl_cfi %ebx
16985 CFI_REL_OFFSET ebx, 0
16986 pushl_cfi %edi
16987 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16988 subl %ebx, %edi
16989 lea -1(%esi),%edx
16990 andl $-32,%edx
16991 - lea 3f(%ebx,%ebx), %ebx
16992 + lea 3f(%ebx,%ebx,2), %ebx
16993 testl %esi, %esi
16994 jmp *%ebx
16995 1: addl $64,%esi
16996 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16997 jb 5f
16998 SRC( movw (%esi), %dx )
16999 leal 2(%esi), %esi
17000 -DST( movw %dx, (%edi) )
17001 +DST( movw %dx, %es:(%edi) )
17002 leal 2(%edi), %edi
17003 je 6f
17004 shll $16,%edx
17005 5:
17006 SRC( movb (%esi), %dl )
17007 -DST( movb %dl, (%edi) )
17008 +DST( movb %dl, %es:(%edi) )
17009 6: addl %edx, %eax
17010 adcl $0, %eax
17011 7:
17012 .section .fixup, "ax"
17013 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17014 - movl $-EFAULT, (%ebx)
17015 + movl $-EFAULT, %ss:(%ebx)
17016 # zero the complete destination (computing the rest is too much work)
17017 movl ARGBASE+8(%esp),%edi # dst
17018 movl ARGBASE+12(%esp),%ecx # len
17019 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17020 rep; stosb
17021 jmp 7b
17022 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17023 - movl $-EFAULT, (%ebx)
17024 + movl $-EFAULT, %ss:(%ebx)
17025 jmp 7b
17026 .previous
17027
17028 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17029 + pushl_cfi %ss
17030 + popl_cfi %ds
17031 + pushl_cfi %ss
17032 + popl_cfi %es
17033 +#endif
17034 +
17035 popl_cfi %esi
17036 CFI_RESTORE esi
17037 popl_cfi %edi
17038 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17039 CFI_RESTORE ebx
17040 ret
17041 CFI_ENDPROC
17042 -ENDPROC(csum_partial_copy_generic)
17043 +ENDPROC(csum_partial_copy_generic_to_user)
17044
17045 #undef ROUND
17046 #undef ROUND1
17047 diff -urNp linux-2.6.39.4/arch/x86/lib/clear_page_64.S linux-2.6.39.4/arch/x86/lib/clear_page_64.S
17048 --- linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-05-19 00:06:34.000000000 -0400
17049 +++ linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-08-05 19:44:35.000000000 -0400
17050 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
17051
17052 #include <asm/cpufeature.h>
17053
17054 - .section .altinstr_replacement,"ax"
17055 + .section .altinstr_replacement,"a"
17056 1: .byte 0xeb /* jmp <disp8> */
17057 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17058 2:
17059 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_page_64.S linux-2.6.39.4/arch/x86/lib/copy_page_64.S
17060 --- linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-05-19 00:06:34.000000000 -0400
17061 +++ linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-08-05 19:44:35.000000000 -0400
17062 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
17063
17064 #include <asm/cpufeature.h>
17065
17066 - .section .altinstr_replacement,"ax"
17067 + .section .altinstr_replacement,"a"
17068 1: .byte 0xeb /* jmp <disp8> */
17069 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17070 2:
17071 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_64.S linux-2.6.39.4/arch/x86/lib/copy_user_64.S
17072 --- linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-06-03 00:04:13.000000000 -0400
17073 +++ linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-08-05 19:44:35.000000000 -0400
17074 @@ -15,13 +15,14 @@
17075 #include <asm/asm-offsets.h>
17076 #include <asm/thread_info.h>
17077 #include <asm/cpufeature.h>
17078 +#include <asm/pgtable.h>
17079
17080 .macro ALTERNATIVE_JUMP feature,orig,alt
17081 0:
17082 .byte 0xe9 /* 32bit jump */
17083 .long \orig-1f /* by default jump to orig */
17084 1:
17085 - .section .altinstr_replacement,"ax"
17086 + .section .altinstr_replacement,"a"
17087 2: .byte 0xe9 /* near jump with 32bit immediate */
17088 .long \alt-1b /* offset */ /* or alternatively to alt */
17089 .previous
17090 @@ -64,37 +65,13 @@
17091 #endif
17092 .endm
17093
17094 -/* Standard copy_to_user with segment limit checking */
17095 -ENTRY(_copy_to_user)
17096 - CFI_STARTPROC
17097 - GET_THREAD_INFO(%rax)
17098 - movq %rdi,%rcx
17099 - addq %rdx,%rcx
17100 - jc bad_to_user
17101 - cmpq TI_addr_limit(%rax),%rcx
17102 - ja bad_to_user
17103 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17104 - CFI_ENDPROC
17105 -ENDPROC(_copy_to_user)
17106 -
17107 -/* Standard copy_from_user with segment limit checking */
17108 -ENTRY(_copy_from_user)
17109 - CFI_STARTPROC
17110 - GET_THREAD_INFO(%rax)
17111 - movq %rsi,%rcx
17112 - addq %rdx,%rcx
17113 - jc bad_from_user
17114 - cmpq TI_addr_limit(%rax),%rcx
17115 - ja bad_from_user
17116 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17117 - CFI_ENDPROC
17118 -ENDPROC(_copy_from_user)
17119 -
17120 .section .fixup,"ax"
17121 /* must zero dest */
17122 ENTRY(bad_from_user)
17123 bad_from_user:
17124 CFI_STARTPROC
17125 + testl %edx,%edx
17126 + js bad_to_user
17127 movl %edx,%ecx
17128 xorl %eax,%eax
17129 rep
17130 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S
17131 --- linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-05-19 00:06:34.000000000 -0400
17132 +++ linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-05 19:44:35.000000000 -0400
17133 @@ -14,6 +14,7 @@
17134 #include <asm/current.h>
17135 #include <asm/asm-offsets.h>
17136 #include <asm/thread_info.h>
17137 +#include <asm/pgtable.h>
17138
17139 .macro ALIGN_DESTINATION
17140 #ifdef FIX_ALIGNMENT
17141 @@ -50,6 +51,15 @@
17142 */
17143 ENTRY(__copy_user_nocache)
17144 CFI_STARTPROC
17145 +
17146 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17147 + mov $PAX_USER_SHADOW_BASE,%rcx
17148 + cmp %rcx,%rsi
17149 + jae 1f
17150 + add %rcx,%rsi
17151 +1:
17152 +#endif
17153 +
17154 cmpl $8,%edx
17155 jb 20f /* less then 8 bytes, go to byte copy loop */
17156 ALIGN_DESTINATION
17157 diff -urNp linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c
17158 --- linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-05-19 00:06:34.000000000 -0400
17159 +++ linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-08-05 19:44:35.000000000 -0400
17160 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17161 len -= 2;
17162 }
17163 }
17164 +
17165 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17166 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17167 + src += PAX_USER_SHADOW_BASE;
17168 +#endif
17169 +
17170 isum = csum_partial_copy_generic((__force const void *)src,
17171 dst, len, isum, errp, NULL);
17172 if (unlikely(*errp))
17173 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17174 }
17175
17176 *errp = 0;
17177 +
17178 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17179 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17180 + dst += PAX_USER_SHADOW_BASE;
17181 +#endif
17182 +
17183 return csum_partial_copy_generic(src, (void __force *)dst,
17184 len, isum, NULL, errp);
17185 }
17186 diff -urNp linux-2.6.39.4/arch/x86/lib/getuser.S linux-2.6.39.4/arch/x86/lib/getuser.S
17187 --- linux-2.6.39.4/arch/x86/lib/getuser.S 2011-05-19 00:06:34.000000000 -0400
17188 +++ linux-2.6.39.4/arch/x86/lib/getuser.S 2011-08-05 19:44:35.000000000 -0400
17189 @@ -33,14 +33,35 @@
17190 #include <asm/asm-offsets.h>
17191 #include <asm/thread_info.h>
17192 #include <asm/asm.h>
17193 +#include <asm/segment.h>
17194 +#include <asm/pgtable.h>
17195 +
17196 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17197 +#define __copyuser_seg gs;
17198 +#else
17199 +#define __copyuser_seg
17200 +#endif
17201
17202 .text
17203 ENTRY(__get_user_1)
17204 CFI_STARTPROC
17205 +
17206 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17207 GET_THREAD_INFO(%_ASM_DX)
17208 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17209 jae bad_get_user
17210 -1: movzb (%_ASM_AX),%edx
17211 +
17212 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17213 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17214 + cmp %_ASM_DX,%_ASM_AX
17215 + jae 1234f
17216 + add %_ASM_DX,%_ASM_AX
17217 +1234:
17218 +#endif
17219 +
17220 +#endif
17221 +
17222 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17223 xor %eax,%eax
17224 ret
17225 CFI_ENDPROC
17226 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17227 ENTRY(__get_user_2)
17228 CFI_STARTPROC
17229 add $1,%_ASM_AX
17230 +
17231 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17232 jc bad_get_user
17233 GET_THREAD_INFO(%_ASM_DX)
17234 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17235 jae bad_get_user
17236 -2: movzwl -1(%_ASM_AX),%edx
17237 +
17238 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17239 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17240 + cmp %_ASM_DX,%_ASM_AX
17241 + jae 1234f
17242 + add %_ASM_DX,%_ASM_AX
17243 +1234:
17244 +#endif
17245 +
17246 +#endif
17247 +
17248 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17249 xor %eax,%eax
17250 ret
17251 CFI_ENDPROC
17252 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17253 ENTRY(__get_user_4)
17254 CFI_STARTPROC
17255 add $3,%_ASM_AX
17256 +
17257 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17258 jc bad_get_user
17259 GET_THREAD_INFO(%_ASM_DX)
17260 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17261 jae bad_get_user
17262 -3: mov -3(%_ASM_AX),%edx
17263 +
17264 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17265 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17266 + cmp %_ASM_DX,%_ASM_AX
17267 + jae 1234f
17268 + add %_ASM_DX,%_ASM_AX
17269 +1234:
17270 +#endif
17271 +
17272 +#endif
17273 +
17274 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17275 xor %eax,%eax
17276 ret
17277 CFI_ENDPROC
17278 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17279 GET_THREAD_INFO(%_ASM_DX)
17280 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17281 jae bad_get_user
17282 +
17283 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17284 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17285 + cmp %_ASM_DX,%_ASM_AX
17286 + jae 1234f
17287 + add %_ASM_DX,%_ASM_AX
17288 +1234:
17289 +#endif
17290 +
17291 4: movq -7(%_ASM_AX),%_ASM_DX
17292 xor %eax,%eax
17293 ret
17294 diff -urNp linux-2.6.39.4/arch/x86/lib/insn.c linux-2.6.39.4/arch/x86/lib/insn.c
17295 --- linux-2.6.39.4/arch/x86/lib/insn.c 2011-05-19 00:06:34.000000000 -0400
17296 +++ linux-2.6.39.4/arch/x86/lib/insn.c 2011-08-05 19:44:35.000000000 -0400
17297 @@ -21,6 +21,11 @@
17298 #include <linux/string.h>
17299 #include <asm/inat.h>
17300 #include <asm/insn.h>
17301 +#ifdef __KERNEL__
17302 +#include <asm/pgtable_types.h>
17303 +#else
17304 +#define ktla_ktva(addr) addr
17305 +#endif
17306
17307 #define get_next(t, insn) \
17308 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17309 @@ -40,8 +45,8 @@
17310 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17311 {
17312 memset(insn, 0, sizeof(*insn));
17313 - insn->kaddr = kaddr;
17314 - insn->next_byte = kaddr;
17315 + insn->kaddr = ktla_ktva(kaddr);
17316 + insn->next_byte = ktla_ktva(kaddr);
17317 insn->x86_64 = x86_64 ? 1 : 0;
17318 insn->opnd_bytes = 4;
17319 if (x86_64)
17320 diff -urNp linux-2.6.39.4/arch/x86/lib/mmx_32.c linux-2.6.39.4/arch/x86/lib/mmx_32.c
17321 --- linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-05-19 00:06:34.000000000 -0400
17322 +++ linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-08-05 19:44:35.000000000 -0400
17323 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17324 {
17325 void *p;
17326 int i;
17327 + unsigned long cr0;
17328
17329 if (unlikely(in_interrupt()))
17330 return __memcpy(to, from, len);
17331 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17332 kernel_fpu_begin();
17333
17334 __asm__ __volatile__ (
17335 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17336 - " prefetch 64(%0)\n"
17337 - " prefetch 128(%0)\n"
17338 - " prefetch 192(%0)\n"
17339 - " prefetch 256(%0)\n"
17340 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17341 + " prefetch 64(%1)\n"
17342 + " prefetch 128(%1)\n"
17343 + " prefetch 192(%1)\n"
17344 + " prefetch 256(%1)\n"
17345 "2: \n"
17346 ".section .fixup, \"ax\"\n"
17347 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17348 + "3: \n"
17349 +
17350 +#ifdef CONFIG_PAX_KERNEXEC
17351 + " movl %%cr0, %0\n"
17352 + " movl %0, %%eax\n"
17353 + " andl $0xFFFEFFFF, %%eax\n"
17354 + " movl %%eax, %%cr0\n"
17355 +#endif
17356 +
17357 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17358 +
17359 +#ifdef CONFIG_PAX_KERNEXEC
17360 + " movl %0, %%cr0\n"
17361 +#endif
17362 +
17363 " jmp 2b\n"
17364 ".previous\n"
17365 _ASM_EXTABLE(1b, 3b)
17366 - : : "r" (from));
17367 + : "=&r" (cr0) : "r" (from) : "ax");
17368
17369 for ( ; i > 5; i--) {
17370 __asm__ __volatile__ (
17371 - "1: prefetch 320(%0)\n"
17372 - "2: movq (%0), %%mm0\n"
17373 - " movq 8(%0), %%mm1\n"
17374 - " movq 16(%0), %%mm2\n"
17375 - " movq 24(%0), %%mm3\n"
17376 - " movq %%mm0, (%1)\n"
17377 - " movq %%mm1, 8(%1)\n"
17378 - " movq %%mm2, 16(%1)\n"
17379 - " movq %%mm3, 24(%1)\n"
17380 - " movq 32(%0), %%mm0\n"
17381 - " movq 40(%0), %%mm1\n"
17382 - " movq 48(%0), %%mm2\n"
17383 - " movq 56(%0), %%mm3\n"
17384 - " movq %%mm0, 32(%1)\n"
17385 - " movq %%mm1, 40(%1)\n"
17386 - " movq %%mm2, 48(%1)\n"
17387 - " movq %%mm3, 56(%1)\n"
17388 + "1: prefetch 320(%1)\n"
17389 + "2: movq (%1), %%mm0\n"
17390 + " movq 8(%1), %%mm1\n"
17391 + " movq 16(%1), %%mm2\n"
17392 + " movq 24(%1), %%mm3\n"
17393 + " movq %%mm0, (%2)\n"
17394 + " movq %%mm1, 8(%2)\n"
17395 + " movq %%mm2, 16(%2)\n"
17396 + " movq %%mm3, 24(%2)\n"
17397 + " movq 32(%1), %%mm0\n"
17398 + " movq 40(%1), %%mm1\n"
17399 + " movq 48(%1), %%mm2\n"
17400 + " movq 56(%1), %%mm3\n"
17401 + " movq %%mm0, 32(%2)\n"
17402 + " movq %%mm1, 40(%2)\n"
17403 + " movq %%mm2, 48(%2)\n"
17404 + " movq %%mm3, 56(%2)\n"
17405 ".section .fixup, \"ax\"\n"
17406 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17407 + "3:\n"
17408 +
17409 +#ifdef CONFIG_PAX_KERNEXEC
17410 + " movl %%cr0, %0\n"
17411 + " movl %0, %%eax\n"
17412 + " andl $0xFFFEFFFF, %%eax\n"
17413 + " movl %%eax, %%cr0\n"
17414 +#endif
17415 +
17416 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17417 +
17418 +#ifdef CONFIG_PAX_KERNEXEC
17419 + " movl %0, %%cr0\n"
17420 +#endif
17421 +
17422 " jmp 2b\n"
17423 ".previous\n"
17424 _ASM_EXTABLE(1b, 3b)
17425 - : : "r" (from), "r" (to) : "memory");
17426 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17427
17428 from += 64;
17429 to += 64;
17430 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17431 static void fast_copy_page(void *to, void *from)
17432 {
17433 int i;
17434 + unsigned long cr0;
17435
17436 kernel_fpu_begin();
17437
17438 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17439 * but that is for later. -AV
17440 */
17441 __asm__ __volatile__(
17442 - "1: prefetch (%0)\n"
17443 - " prefetch 64(%0)\n"
17444 - " prefetch 128(%0)\n"
17445 - " prefetch 192(%0)\n"
17446 - " prefetch 256(%0)\n"
17447 + "1: prefetch (%1)\n"
17448 + " prefetch 64(%1)\n"
17449 + " prefetch 128(%1)\n"
17450 + " prefetch 192(%1)\n"
17451 + " prefetch 256(%1)\n"
17452 "2: \n"
17453 ".section .fixup, \"ax\"\n"
17454 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17455 + "3: \n"
17456 +
17457 +#ifdef CONFIG_PAX_KERNEXEC
17458 + " movl %%cr0, %0\n"
17459 + " movl %0, %%eax\n"
17460 + " andl $0xFFFEFFFF, %%eax\n"
17461 + " movl %%eax, %%cr0\n"
17462 +#endif
17463 +
17464 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17465 +
17466 +#ifdef CONFIG_PAX_KERNEXEC
17467 + " movl %0, %%cr0\n"
17468 +#endif
17469 +
17470 " jmp 2b\n"
17471 ".previous\n"
17472 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17473 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17474
17475 for (i = 0; i < (4096-320)/64; i++) {
17476 __asm__ __volatile__ (
17477 - "1: prefetch 320(%0)\n"
17478 - "2: movq (%0), %%mm0\n"
17479 - " movntq %%mm0, (%1)\n"
17480 - " movq 8(%0), %%mm1\n"
17481 - " movntq %%mm1, 8(%1)\n"
17482 - " movq 16(%0), %%mm2\n"
17483 - " movntq %%mm2, 16(%1)\n"
17484 - " movq 24(%0), %%mm3\n"
17485 - " movntq %%mm3, 24(%1)\n"
17486 - " movq 32(%0), %%mm4\n"
17487 - " movntq %%mm4, 32(%1)\n"
17488 - " movq 40(%0), %%mm5\n"
17489 - " movntq %%mm5, 40(%1)\n"
17490 - " movq 48(%0), %%mm6\n"
17491 - " movntq %%mm6, 48(%1)\n"
17492 - " movq 56(%0), %%mm7\n"
17493 - " movntq %%mm7, 56(%1)\n"
17494 + "1: prefetch 320(%1)\n"
17495 + "2: movq (%1), %%mm0\n"
17496 + " movntq %%mm0, (%2)\n"
17497 + " movq 8(%1), %%mm1\n"
17498 + " movntq %%mm1, 8(%2)\n"
17499 + " movq 16(%1), %%mm2\n"
17500 + " movntq %%mm2, 16(%2)\n"
17501 + " movq 24(%1), %%mm3\n"
17502 + " movntq %%mm3, 24(%2)\n"
17503 + " movq 32(%1), %%mm4\n"
17504 + " movntq %%mm4, 32(%2)\n"
17505 + " movq 40(%1), %%mm5\n"
17506 + " movntq %%mm5, 40(%2)\n"
17507 + " movq 48(%1), %%mm6\n"
17508 + " movntq %%mm6, 48(%2)\n"
17509 + " movq 56(%1), %%mm7\n"
17510 + " movntq %%mm7, 56(%2)\n"
17511 ".section .fixup, \"ax\"\n"
17512 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17513 + "3:\n"
17514 +
17515 +#ifdef CONFIG_PAX_KERNEXEC
17516 + " movl %%cr0, %0\n"
17517 + " movl %0, %%eax\n"
17518 + " andl $0xFFFEFFFF, %%eax\n"
17519 + " movl %%eax, %%cr0\n"
17520 +#endif
17521 +
17522 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17523 +
17524 +#ifdef CONFIG_PAX_KERNEXEC
17525 + " movl %0, %%cr0\n"
17526 +#endif
17527 +
17528 " jmp 2b\n"
17529 ".previous\n"
17530 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17531 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17532
17533 from += 64;
17534 to += 64;
17535 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17536 static void fast_copy_page(void *to, void *from)
17537 {
17538 int i;
17539 + unsigned long cr0;
17540
17541 kernel_fpu_begin();
17542
17543 __asm__ __volatile__ (
17544 - "1: prefetch (%0)\n"
17545 - " prefetch 64(%0)\n"
17546 - " prefetch 128(%0)\n"
17547 - " prefetch 192(%0)\n"
17548 - " prefetch 256(%0)\n"
17549 + "1: prefetch (%1)\n"
17550 + " prefetch 64(%1)\n"
17551 + " prefetch 128(%1)\n"
17552 + " prefetch 192(%1)\n"
17553 + " prefetch 256(%1)\n"
17554 "2: \n"
17555 ".section .fixup, \"ax\"\n"
17556 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17557 + "3: \n"
17558 +
17559 +#ifdef CONFIG_PAX_KERNEXEC
17560 + " movl %%cr0, %0\n"
17561 + " movl %0, %%eax\n"
17562 + " andl $0xFFFEFFFF, %%eax\n"
17563 + " movl %%eax, %%cr0\n"
17564 +#endif
17565 +
17566 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17567 +
17568 +#ifdef CONFIG_PAX_KERNEXEC
17569 + " movl %0, %%cr0\n"
17570 +#endif
17571 +
17572 " jmp 2b\n"
17573 ".previous\n"
17574 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17575 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17576
17577 for (i = 0; i < 4096/64; i++) {
17578 __asm__ __volatile__ (
17579 - "1: prefetch 320(%0)\n"
17580 - "2: movq (%0), %%mm0\n"
17581 - " movq 8(%0), %%mm1\n"
17582 - " movq 16(%0), %%mm2\n"
17583 - " movq 24(%0), %%mm3\n"
17584 - " movq %%mm0, (%1)\n"
17585 - " movq %%mm1, 8(%1)\n"
17586 - " movq %%mm2, 16(%1)\n"
17587 - " movq %%mm3, 24(%1)\n"
17588 - " movq 32(%0), %%mm0\n"
17589 - " movq 40(%0), %%mm1\n"
17590 - " movq 48(%0), %%mm2\n"
17591 - " movq 56(%0), %%mm3\n"
17592 - " movq %%mm0, 32(%1)\n"
17593 - " movq %%mm1, 40(%1)\n"
17594 - " movq %%mm2, 48(%1)\n"
17595 - " movq %%mm3, 56(%1)\n"
17596 + "1: prefetch 320(%1)\n"
17597 + "2: movq (%1), %%mm0\n"
17598 + " movq 8(%1), %%mm1\n"
17599 + " movq 16(%1), %%mm2\n"
17600 + " movq 24(%1), %%mm3\n"
17601 + " movq %%mm0, (%2)\n"
17602 + " movq %%mm1, 8(%2)\n"
17603 + " movq %%mm2, 16(%2)\n"
17604 + " movq %%mm3, 24(%2)\n"
17605 + " movq 32(%1), %%mm0\n"
17606 + " movq 40(%1), %%mm1\n"
17607 + " movq 48(%1), %%mm2\n"
17608 + " movq 56(%1), %%mm3\n"
17609 + " movq %%mm0, 32(%2)\n"
17610 + " movq %%mm1, 40(%2)\n"
17611 + " movq %%mm2, 48(%2)\n"
17612 + " movq %%mm3, 56(%2)\n"
17613 ".section .fixup, \"ax\"\n"
17614 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17615 + "3:\n"
17616 +
17617 +#ifdef CONFIG_PAX_KERNEXEC
17618 + " movl %%cr0, %0\n"
17619 + " movl %0, %%eax\n"
17620 + " andl $0xFFFEFFFF, %%eax\n"
17621 + " movl %%eax, %%cr0\n"
17622 +#endif
17623 +
17624 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17625 +
17626 +#ifdef CONFIG_PAX_KERNEXEC
17627 + " movl %0, %%cr0\n"
17628 +#endif
17629 +
17630 " jmp 2b\n"
17631 ".previous\n"
17632 _ASM_EXTABLE(1b, 3b)
17633 - : : "r" (from), "r" (to) : "memory");
17634 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17635
17636 from += 64;
17637 to += 64;
17638 diff -urNp linux-2.6.39.4/arch/x86/lib/putuser.S linux-2.6.39.4/arch/x86/lib/putuser.S
17639 --- linux-2.6.39.4/arch/x86/lib/putuser.S 2011-05-19 00:06:34.000000000 -0400
17640 +++ linux-2.6.39.4/arch/x86/lib/putuser.S 2011-08-05 19:44:35.000000000 -0400
17641 @@ -15,7 +15,8 @@
17642 #include <asm/thread_info.h>
17643 #include <asm/errno.h>
17644 #include <asm/asm.h>
17645 -
17646 +#include <asm/segment.h>
17647 +#include <asm/pgtable.h>
17648
17649 /*
17650 * __put_user_X
17651 @@ -29,52 +30,119 @@
17652 * as they get called from within inline assembly.
17653 */
17654
17655 -#define ENTER CFI_STARTPROC ; \
17656 - GET_THREAD_INFO(%_ASM_BX)
17657 +#define ENTER CFI_STARTPROC
17658 #define EXIT ret ; \
17659 CFI_ENDPROC
17660
17661 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17662 +#define _DEST %_ASM_CX,%_ASM_BX
17663 +#else
17664 +#define _DEST %_ASM_CX
17665 +#endif
17666 +
17667 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17668 +#define __copyuser_seg gs;
17669 +#else
17670 +#define __copyuser_seg
17671 +#endif
17672 +
17673 .text
17674 ENTRY(__put_user_1)
17675 ENTER
17676 +
17677 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17678 + GET_THREAD_INFO(%_ASM_BX)
17679 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17680 jae bad_put_user
17681 -1: movb %al,(%_ASM_CX)
17682 +
17683 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17684 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17685 + cmp %_ASM_BX,%_ASM_CX
17686 + jb 1234f
17687 + xor %ebx,%ebx
17688 +1234:
17689 +#endif
17690 +
17691 +#endif
17692 +
17693 +1: __copyuser_seg movb %al,(_DEST)
17694 xor %eax,%eax
17695 EXIT
17696 ENDPROC(__put_user_1)
17697
17698 ENTRY(__put_user_2)
17699 ENTER
17700 +
17701 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17702 + GET_THREAD_INFO(%_ASM_BX)
17703 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17704 sub $1,%_ASM_BX
17705 cmp %_ASM_BX,%_ASM_CX
17706 jae bad_put_user
17707 -2: movw %ax,(%_ASM_CX)
17708 +
17709 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17710 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17711 + cmp %_ASM_BX,%_ASM_CX
17712 + jb 1234f
17713 + xor %ebx,%ebx
17714 +1234:
17715 +#endif
17716 +
17717 +#endif
17718 +
17719 +2: __copyuser_seg movw %ax,(_DEST)
17720 xor %eax,%eax
17721 EXIT
17722 ENDPROC(__put_user_2)
17723
17724 ENTRY(__put_user_4)
17725 ENTER
17726 +
17727 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17728 + GET_THREAD_INFO(%_ASM_BX)
17729 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17730 sub $3,%_ASM_BX
17731 cmp %_ASM_BX,%_ASM_CX
17732 jae bad_put_user
17733 -3: movl %eax,(%_ASM_CX)
17734 +
17735 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17736 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17737 + cmp %_ASM_BX,%_ASM_CX
17738 + jb 1234f
17739 + xor %ebx,%ebx
17740 +1234:
17741 +#endif
17742 +
17743 +#endif
17744 +
17745 +3: __copyuser_seg movl %eax,(_DEST)
17746 xor %eax,%eax
17747 EXIT
17748 ENDPROC(__put_user_4)
17749
17750 ENTRY(__put_user_8)
17751 ENTER
17752 +
17753 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17754 + GET_THREAD_INFO(%_ASM_BX)
17755 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17756 sub $7,%_ASM_BX
17757 cmp %_ASM_BX,%_ASM_CX
17758 jae bad_put_user
17759 -4: mov %_ASM_AX,(%_ASM_CX)
17760 +
17761 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17762 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17763 + cmp %_ASM_BX,%_ASM_CX
17764 + jb 1234f
17765 + xor %ebx,%ebx
17766 +1234:
17767 +#endif
17768 +
17769 +#endif
17770 +
17771 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17772 #ifdef CONFIG_X86_32
17773 -5: movl %edx,4(%_ASM_CX)
17774 +5: __copyuser_seg movl %edx,4(_DEST)
17775 #endif
17776 xor %eax,%eax
17777 EXIT
17778 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_32.c linux-2.6.39.4/arch/x86/lib/usercopy_32.c
17779 --- linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-05-19 00:06:34.000000000 -0400
17780 +++ linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-08-05 19:44:35.000000000 -0400
17781 @@ -43,7 +43,7 @@ do { \
17782 __asm__ __volatile__( \
17783 " testl %1,%1\n" \
17784 " jz 2f\n" \
17785 - "0: lodsb\n" \
17786 + "0: "__copyuser_seg"lodsb\n" \
17787 " stosb\n" \
17788 " testb %%al,%%al\n" \
17789 " jz 1f\n" \
17790 @@ -128,10 +128,12 @@ do { \
17791 int __d0; \
17792 might_fault(); \
17793 __asm__ __volatile__( \
17794 + __COPYUSER_SET_ES \
17795 "0: rep; stosl\n" \
17796 " movl %2,%0\n" \
17797 "1: rep; stosb\n" \
17798 "2:\n" \
17799 + __COPYUSER_RESTORE_ES \
17800 ".section .fixup,\"ax\"\n" \
17801 "3: lea 0(%2,%0,4),%0\n" \
17802 " jmp 2b\n" \
17803 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17804 might_fault();
17805
17806 __asm__ __volatile__(
17807 + __COPYUSER_SET_ES
17808 " testl %0, %0\n"
17809 " jz 3f\n"
17810 " andl %0,%%ecx\n"
17811 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17812 " subl %%ecx,%0\n"
17813 " addl %0,%%eax\n"
17814 "1:\n"
17815 + __COPYUSER_RESTORE_ES
17816 ".section .fixup,\"ax\"\n"
17817 "2: xorl %%eax,%%eax\n"
17818 " jmp 1b\n"
17819 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17820
17821 #ifdef CONFIG_X86_INTEL_USERCOPY
17822 static unsigned long
17823 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17824 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17825 {
17826 int d0, d1;
17827 __asm__ __volatile__(
17828 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17829 " .align 2,0x90\n"
17830 "3: movl 0(%4), %%eax\n"
17831 "4: movl 4(%4), %%edx\n"
17832 - "5: movl %%eax, 0(%3)\n"
17833 - "6: movl %%edx, 4(%3)\n"
17834 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17835 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17836 "7: movl 8(%4), %%eax\n"
17837 "8: movl 12(%4),%%edx\n"
17838 - "9: movl %%eax, 8(%3)\n"
17839 - "10: movl %%edx, 12(%3)\n"
17840 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17841 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17842 "11: movl 16(%4), %%eax\n"
17843 "12: movl 20(%4), %%edx\n"
17844 - "13: movl %%eax, 16(%3)\n"
17845 - "14: movl %%edx, 20(%3)\n"
17846 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17847 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17848 "15: movl 24(%4), %%eax\n"
17849 "16: movl 28(%4), %%edx\n"
17850 - "17: movl %%eax, 24(%3)\n"
17851 - "18: movl %%edx, 28(%3)\n"
17852 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17853 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17854 "19: movl 32(%4), %%eax\n"
17855 "20: movl 36(%4), %%edx\n"
17856 - "21: movl %%eax, 32(%3)\n"
17857 - "22: movl %%edx, 36(%3)\n"
17858 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17859 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17860 "23: movl 40(%4), %%eax\n"
17861 "24: movl 44(%4), %%edx\n"
17862 - "25: movl %%eax, 40(%3)\n"
17863 - "26: movl %%edx, 44(%3)\n"
17864 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17865 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17866 "27: movl 48(%4), %%eax\n"
17867 "28: movl 52(%4), %%edx\n"
17868 - "29: movl %%eax, 48(%3)\n"
17869 - "30: movl %%edx, 52(%3)\n"
17870 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17871 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17872 "31: movl 56(%4), %%eax\n"
17873 "32: movl 60(%4), %%edx\n"
17874 - "33: movl %%eax, 56(%3)\n"
17875 - "34: movl %%edx, 60(%3)\n"
17876 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17877 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17878 " addl $-64, %0\n"
17879 " addl $64, %4\n"
17880 " addl $64, %3\n"
17881 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17882 " shrl $2, %0\n"
17883 " andl $3, %%eax\n"
17884 " cld\n"
17885 + __COPYUSER_SET_ES
17886 "99: rep; movsl\n"
17887 "36: movl %%eax, %0\n"
17888 "37: rep; movsb\n"
17889 "100:\n"
17890 + __COPYUSER_RESTORE_ES
17891 + ".section .fixup,\"ax\"\n"
17892 + "101: lea 0(%%eax,%0,4),%0\n"
17893 + " jmp 100b\n"
17894 + ".previous\n"
17895 + ".section __ex_table,\"a\"\n"
17896 + " .align 4\n"
17897 + " .long 1b,100b\n"
17898 + " .long 2b,100b\n"
17899 + " .long 3b,100b\n"
17900 + " .long 4b,100b\n"
17901 + " .long 5b,100b\n"
17902 + " .long 6b,100b\n"
17903 + " .long 7b,100b\n"
17904 + " .long 8b,100b\n"
17905 + " .long 9b,100b\n"
17906 + " .long 10b,100b\n"
17907 + " .long 11b,100b\n"
17908 + " .long 12b,100b\n"
17909 + " .long 13b,100b\n"
17910 + " .long 14b,100b\n"
17911 + " .long 15b,100b\n"
17912 + " .long 16b,100b\n"
17913 + " .long 17b,100b\n"
17914 + " .long 18b,100b\n"
17915 + " .long 19b,100b\n"
17916 + " .long 20b,100b\n"
17917 + " .long 21b,100b\n"
17918 + " .long 22b,100b\n"
17919 + " .long 23b,100b\n"
17920 + " .long 24b,100b\n"
17921 + " .long 25b,100b\n"
17922 + " .long 26b,100b\n"
17923 + " .long 27b,100b\n"
17924 + " .long 28b,100b\n"
17925 + " .long 29b,100b\n"
17926 + " .long 30b,100b\n"
17927 + " .long 31b,100b\n"
17928 + " .long 32b,100b\n"
17929 + " .long 33b,100b\n"
17930 + " .long 34b,100b\n"
17931 + " .long 35b,100b\n"
17932 + " .long 36b,100b\n"
17933 + " .long 37b,100b\n"
17934 + " .long 99b,101b\n"
17935 + ".previous"
17936 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17937 + : "1"(to), "2"(from), "0"(size)
17938 + : "eax", "edx", "memory");
17939 + return size;
17940 +}
17941 +
17942 +static unsigned long
17943 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17944 +{
17945 + int d0, d1;
17946 + __asm__ __volatile__(
17947 + " .align 2,0x90\n"
17948 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17949 + " cmpl $67, %0\n"
17950 + " jbe 3f\n"
17951 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17952 + " .align 2,0x90\n"
17953 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17954 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17955 + "5: movl %%eax, 0(%3)\n"
17956 + "6: movl %%edx, 4(%3)\n"
17957 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17958 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17959 + "9: movl %%eax, 8(%3)\n"
17960 + "10: movl %%edx, 12(%3)\n"
17961 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17962 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17963 + "13: movl %%eax, 16(%3)\n"
17964 + "14: movl %%edx, 20(%3)\n"
17965 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17966 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17967 + "17: movl %%eax, 24(%3)\n"
17968 + "18: movl %%edx, 28(%3)\n"
17969 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17970 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17971 + "21: movl %%eax, 32(%3)\n"
17972 + "22: movl %%edx, 36(%3)\n"
17973 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17974 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17975 + "25: movl %%eax, 40(%3)\n"
17976 + "26: movl %%edx, 44(%3)\n"
17977 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17978 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17979 + "29: movl %%eax, 48(%3)\n"
17980 + "30: movl %%edx, 52(%3)\n"
17981 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17982 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17983 + "33: movl %%eax, 56(%3)\n"
17984 + "34: movl %%edx, 60(%3)\n"
17985 + " addl $-64, %0\n"
17986 + " addl $64, %4\n"
17987 + " addl $64, %3\n"
17988 + " cmpl $63, %0\n"
17989 + " ja 1b\n"
17990 + "35: movl %0, %%eax\n"
17991 + " shrl $2, %0\n"
17992 + " andl $3, %%eax\n"
17993 + " cld\n"
17994 + "99: rep; "__copyuser_seg" movsl\n"
17995 + "36: movl %%eax, %0\n"
17996 + "37: rep; "__copyuser_seg" movsb\n"
17997 + "100:\n"
17998 ".section .fixup,\"ax\"\n"
17999 "101: lea 0(%%eax,%0,4),%0\n"
18000 " jmp 100b\n"
18001 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18002 int d0, d1;
18003 __asm__ __volatile__(
18004 " .align 2,0x90\n"
18005 - "0: movl 32(%4), %%eax\n"
18006 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18007 " cmpl $67, %0\n"
18008 " jbe 2f\n"
18009 - "1: movl 64(%4), %%eax\n"
18010 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18011 " .align 2,0x90\n"
18012 - "2: movl 0(%4), %%eax\n"
18013 - "21: movl 4(%4), %%edx\n"
18014 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18015 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18016 " movl %%eax, 0(%3)\n"
18017 " movl %%edx, 4(%3)\n"
18018 - "3: movl 8(%4), %%eax\n"
18019 - "31: movl 12(%4),%%edx\n"
18020 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18021 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18022 " movl %%eax, 8(%3)\n"
18023 " movl %%edx, 12(%3)\n"
18024 - "4: movl 16(%4), %%eax\n"
18025 - "41: movl 20(%4), %%edx\n"
18026 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18027 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18028 " movl %%eax, 16(%3)\n"
18029 " movl %%edx, 20(%3)\n"
18030 - "10: movl 24(%4), %%eax\n"
18031 - "51: movl 28(%4), %%edx\n"
18032 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18033 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18034 " movl %%eax, 24(%3)\n"
18035 " movl %%edx, 28(%3)\n"
18036 - "11: movl 32(%4), %%eax\n"
18037 - "61: movl 36(%4), %%edx\n"
18038 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18039 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18040 " movl %%eax, 32(%3)\n"
18041 " movl %%edx, 36(%3)\n"
18042 - "12: movl 40(%4), %%eax\n"
18043 - "71: movl 44(%4), %%edx\n"
18044 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18045 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18046 " movl %%eax, 40(%3)\n"
18047 " movl %%edx, 44(%3)\n"
18048 - "13: movl 48(%4), %%eax\n"
18049 - "81: movl 52(%4), %%edx\n"
18050 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18051 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18052 " movl %%eax, 48(%3)\n"
18053 " movl %%edx, 52(%3)\n"
18054 - "14: movl 56(%4), %%eax\n"
18055 - "91: movl 60(%4), %%edx\n"
18056 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18057 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18058 " movl %%eax, 56(%3)\n"
18059 " movl %%edx, 60(%3)\n"
18060 " addl $-64, %0\n"
18061 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18062 " shrl $2, %0\n"
18063 " andl $3, %%eax\n"
18064 " cld\n"
18065 - "6: rep; movsl\n"
18066 + "6: rep; "__copyuser_seg" movsl\n"
18067 " movl %%eax,%0\n"
18068 - "7: rep; movsb\n"
18069 + "7: rep; "__copyuser_seg" movsb\n"
18070 "8:\n"
18071 ".section .fixup,\"ax\"\n"
18072 "9: lea 0(%%eax,%0,4),%0\n"
18073 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18074
18075 __asm__ __volatile__(
18076 " .align 2,0x90\n"
18077 - "0: movl 32(%4), %%eax\n"
18078 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18079 " cmpl $67, %0\n"
18080 " jbe 2f\n"
18081 - "1: movl 64(%4), %%eax\n"
18082 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18083 " .align 2,0x90\n"
18084 - "2: movl 0(%4), %%eax\n"
18085 - "21: movl 4(%4), %%edx\n"
18086 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18087 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18088 " movnti %%eax, 0(%3)\n"
18089 " movnti %%edx, 4(%3)\n"
18090 - "3: movl 8(%4), %%eax\n"
18091 - "31: movl 12(%4),%%edx\n"
18092 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18093 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18094 " movnti %%eax, 8(%3)\n"
18095 " movnti %%edx, 12(%3)\n"
18096 - "4: movl 16(%4), %%eax\n"
18097 - "41: movl 20(%4), %%edx\n"
18098 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18099 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18100 " movnti %%eax, 16(%3)\n"
18101 " movnti %%edx, 20(%3)\n"
18102 - "10: movl 24(%4), %%eax\n"
18103 - "51: movl 28(%4), %%edx\n"
18104 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18105 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18106 " movnti %%eax, 24(%3)\n"
18107 " movnti %%edx, 28(%3)\n"
18108 - "11: movl 32(%4), %%eax\n"
18109 - "61: movl 36(%4), %%edx\n"
18110 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18111 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18112 " movnti %%eax, 32(%3)\n"
18113 " movnti %%edx, 36(%3)\n"
18114 - "12: movl 40(%4), %%eax\n"
18115 - "71: movl 44(%4), %%edx\n"
18116 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18117 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18118 " movnti %%eax, 40(%3)\n"
18119 " movnti %%edx, 44(%3)\n"
18120 - "13: movl 48(%4), %%eax\n"
18121 - "81: movl 52(%4), %%edx\n"
18122 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18123 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18124 " movnti %%eax, 48(%3)\n"
18125 " movnti %%edx, 52(%3)\n"
18126 - "14: movl 56(%4), %%eax\n"
18127 - "91: movl 60(%4), %%edx\n"
18128 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18129 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18130 " movnti %%eax, 56(%3)\n"
18131 " movnti %%edx, 60(%3)\n"
18132 " addl $-64, %0\n"
18133 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18134 " shrl $2, %0\n"
18135 " andl $3, %%eax\n"
18136 " cld\n"
18137 - "6: rep; movsl\n"
18138 + "6: rep; "__copyuser_seg" movsl\n"
18139 " movl %%eax,%0\n"
18140 - "7: rep; movsb\n"
18141 + "7: rep; "__copyuser_seg" movsb\n"
18142 "8:\n"
18143 ".section .fixup,\"ax\"\n"
18144 "9: lea 0(%%eax,%0,4),%0\n"
18145 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18146
18147 __asm__ __volatile__(
18148 " .align 2,0x90\n"
18149 - "0: movl 32(%4), %%eax\n"
18150 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18151 " cmpl $67, %0\n"
18152 " jbe 2f\n"
18153 - "1: movl 64(%4), %%eax\n"
18154 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18155 " .align 2,0x90\n"
18156 - "2: movl 0(%4), %%eax\n"
18157 - "21: movl 4(%4), %%edx\n"
18158 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18159 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18160 " movnti %%eax, 0(%3)\n"
18161 " movnti %%edx, 4(%3)\n"
18162 - "3: movl 8(%4), %%eax\n"
18163 - "31: movl 12(%4),%%edx\n"
18164 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18165 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18166 " movnti %%eax, 8(%3)\n"
18167 " movnti %%edx, 12(%3)\n"
18168 - "4: movl 16(%4), %%eax\n"
18169 - "41: movl 20(%4), %%edx\n"
18170 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18171 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18172 " movnti %%eax, 16(%3)\n"
18173 " movnti %%edx, 20(%3)\n"
18174 - "10: movl 24(%4), %%eax\n"
18175 - "51: movl 28(%4), %%edx\n"
18176 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18177 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18178 " movnti %%eax, 24(%3)\n"
18179 " movnti %%edx, 28(%3)\n"
18180 - "11: movl 32(%4), %%eax\n"
18181 - "61: movl 36(%4), %%edx\n"
18182 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18183 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18184 " movnti %%eax, 32(%3)\n"
18185 " movnti %%edx, 36(%3)\n"
18186 - "12: movl 40(%4), %%eax\n"
18187 - "71: movl 44(%4), %%edx\n"
18188 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18189 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18190 " movnti %%eax, 40(%3)\n"
18191 " movnti %%edx, 44(%3)\n"
18192 - "13: movl 48(%4), %%eax\n"
18193 - "81: movl 52(%4), %%edx\n"
18194 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18195 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18196 " movnti %%eax, 48(%3)\n"
18197 " movnti %%edx, 52(%3)\n"
18198 - "14: movl 56(%4), %%eax\n"
18199 - "91: movl 60(%4), %%edx\n"
18200 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18201 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18202 " movnti %%eax, 56(%3)\n"
18203 " movnti %%edx, 60(%3)\n"
18204 " addl $-64, %0\n"
18205 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18206 " shrl $2, %0\n"
18207 " andl $3, %%eax\n"
18208 " cld\n"
18209 - "6: rep; movsl\n"
18210 + "6: rep; "__copyuser_seg" movsl\n"
18211 " movl %%eax,%0\n"
18212 - "7: rep; movsb\n"
18213 + "7: rep; "__copyuser_seg" movsb\n"
18214 "8:\n"
18215 ".section .fixup,\"ax\"\n"
18216 "9: lea 0(%%eax,%0,4),%0\n"
18217 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18218 */
18219 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18220 unsigned long size);
18221 -unsigned long __copy_user_intel(void __user *to, const void *from,
18222 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18223 + unsigned long size);
18224 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18225 unsigned long size);
18226 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18227 const void __user *from, unsigned long size);
18228 #endif /* CONFIG_X86_INTEL_USERCOPY */
18229
18230 /* Generic arbitrary sized copy. */
18231 -#define __copy_user(to, from, size) \
18232 +#define __copy_user(to, from, size, prefix, set, restore) \
18233 do { \
18234 int __d0, __d1, __d2; \
18235 __asm__ __volatile__( \
18236 + set \
18237 " cmp $7,%0\n" \
18238 " jbe 1f\n" \
18239 " movl %1,%0\n" \
18240 " negl %0\n" \
18241 " andl $7,%0\n" \
18242 " subl %0,%3\n" \
18243 - "4: rep; movsb\n" \
18244 + "4: rep; "prefix"movsb\n" \
18245 " movl %3,%0\n" \
18246 " shrl $2,%0\n" \
18247 " andl $3,%3\n" \
18248 " .align 2,0x90\n" \
18249 - "0: rep; movsl\n" \
18250 + "0: rep; "prefix"movsl\n" \
18251 " movl %3,%0\n" \
18252 - "1: rep; movsb\n" \
18253 + "1: rep; "prefix"movsb\n" \
18254 "2:\n" \
18255 + restore \
18256 ".section .fixup,\"ax\"\n" \
18257 "5: addl %3,%0\n" \
18258 " jmp 2b\n" \
18259 @@ -682,14 +799,14 @@ do { \
18260 " negl %0\n" \
18261 " andl $7,%0\n" \
18262 " subl %0,%3\n" \
18263 - "4: rep; movsb\n" \
18264 + "4: rep; "__copyuser_seg"movsb\n" \
18265 " movl %3,%0\n" \
18266 " shrl $2,%0\n" \
18267 " andl $3,%3\n" \
18268 " .align 2,0x90\n" \
18269 - "0: rep; movsl\n" \
18270 + "0: rep; "__copyuser_seg"movsl\n" \
18271 " movl %3,%0\n" \
18272 - "1: rep; movsb\n" \
18273 + "1: rep; "__copyuser_seg"movsb\n" \
18274 "2:\n" \
18275 ".section .fixup,\"ax\"\n" \
18276 "5: addl %3,%0\n" \
18277 @@ -775,9 +892,9 @@ survive:
18278 }
18279 #endif
18280 if (movsl_is_ok(to, from, n))
18281 - __copy_user(to, from, n);
18282 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18283 else
18284 - n = __copy_user_intel(to, from, n);
18285 + n = __generic_copy_to_user_intel(to, from, n);
18286 return n;
18287 }
18288 EXPORT_SYMBOL(__copy_to_user_ll);
18289 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18290 unsigned long n)
18291 {
18292 if (movsl_is_ok(to, from, n))
18293 - __copy_user(to, from, n);
18294 + __copy_user(to, from, n, __copyuser_seg, "", "");
18295 else
18296 - n = __copy_user_intel((void __user *)to,
18297 - (const void *)from, n);
18298 + n = __generic_copy_from_user_intel(to, from, n);
18299 return n;
18300 }
18301 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18302 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18303 if (n > 64 && cpu_has_xmm2)
18304 n = __copy_user_intel_nocache(to, from, n);
18305 else
18306 - __copy_user(to, from, n);
18307 + __copy_user(to, from, n, __copyuser_seg, "", "");
18308 #else
18309 - __copy_user(to, from, n);
18310 + __copy_user(to, from, n, __copyuser_seg, "", "");
18311 #endif
18312 return n;
18313 }
18314 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18315
18316 -/**
18317 - * copy_to_user: - Copy a block of data into user space.
18318 - * @to: Destination address, in user space.
18319 - * @from: Source address, in kernel space.
18320 - * @n: Number of bytes to copy.
18321 - *
18322 - * Context: User context only. This function may sleep.
18323 - *
18324 - * Copy data from kernel space to user space.
18325 - *
18326 - * Returns number of bytes that could not be copied.
18327 - * On success, this will be zero.
18328 - */
18329 -unsigned long
18330 -copy_to_user(void __user *to, const void *from, unsigned long n)
18331 +void copy_from_user_overflow(void)
18332 {
18333 - if (access_ok(VERIFY_WRITE, to, n))
18334 - n = __copy_to_user(to, from, n);
18335 - return n;
18336 + WARN(1, "Buffer overflow detected!\n");
18337 }
18338 -EXPORT_SYMBOL(copy_to_user);
18339 +EXPORT_SYMBOL(copy_from_user_overflow);
18340
18341 -/**
18342 - * copy_from_user: - Copy a block of data from user space.
18343 - * @to: Destination address, in kernel space.
18344 - * @from: Source address, in user space.
18345 - * @n: Number of bytes to copy.
18346 - *
18347 - * Context: User context only. This function may sleep.
18348 - *
18349 - * Copy data from user space to kernel space.
18350 - *
18351 - * Returns number of bytes that could not be copied.
18352 - * On success, this will be zero.
18353 - *
18354 - * If some data could not be copied, this function will pad the copied
18355 - * data to the requested size using zero bytes.
18356 - */
18357 -unsigned long
18358 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18359 +void copy_to_user_overflow(void)
18360 {
18361 - if (access_ok(VERIFY_READ, from, n))
18362 - n = __copy_from_user(to, from, n);
18363 - else
18364 - memset(to, 0, n);
18365 - return n;
18366 + WARN(1, "Buffer overflow detected!\n");
18367 }
18368 -EXPORT_SYMBOL(_copy_from_user);
18369 +EXPORT_SYMBOL(copy_to_user_overflow);
18370
18371 -void copy_from_user_overflow(void)
18372 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18373 +void __set_fs(mm_segment_t x)
18374 {
18375 - WARN(1, "Buffer overflow detected!\n");
18376 + switch (x.seg) {
18377 + case 0:
18378 + loadsegment(gs, 0);
18379 + break;
18380 + case TASK_SIZE_MAX:
18381 + loadsegment(gs, __USER_DS);
18382 + break;
18383 + case -1UL:
18384 + loadsegment(gs, __KERNEL_DS);
18385 + break;
18386 + default:
18387 + BUG();
18388 + }
18389 + return;
18390 }
18391 -EXPORT_SYMBOL(copy_from_user_overflow);
18392 +EXPORT_SYMBOL(__set_fs);
18393 +
18394 +void set_fs(mm_segment_t x)
18395 +{
18396 + current_thread_info()->addr_limit = x;
18397 + __set_fs(x);
18398 +}
18399 +EXPORT_SYMBOL(set_fs);
18400 +#endif
18401 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_64.c linux-2.6.39.4/arch/x86/lib/usercopy_64.c
18402 --- linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
18403 +++ linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-08-05 19:44:35.000000000 -0400
18404 @@ -42,6 +42,12 @@ long
18405 __strncpy_from_user(char *dst, const char __user *src, long count)
18406 {
18407 long res;
18408 +
18409 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18410 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18411 + src += PAX_USER_SHADOW_BASE;
18412 +#endif
18413 +
18414 __do_strncpy_from_user(dst, src, count, res);
18415 return res;
18416 }
18417 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18418 {
18419 long __d0;
18420 might_fault();
18421 +
18422 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18423 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18424 + addr += PAX_USER_SHADOW_BASE;
18425 +#endif
18426 +
18427 /* no memory constraint because it doesn't change any memory gcc knows
18428 about */
18429 asm volatile(
18430 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18431
18432 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18433 {
18434 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18435 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18436 +
18437 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18438 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18439 + to += PAX_USER_SHADOW_BASE;
18440 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18441 + from += PAX_USER_SHADOW_BASE;
18442 +#endif
18443 +
18444 return copy_user_generic((__force void *)to, (__force void *)from, len);
18445 - }
18446 - return len;
18447 + }
18448 + return len;
18449 }
18450 EXPORT_SYMBOL(copy_in_user);
18451
18452 diff -urNp linux-2.6.39.4/arch/x86/Makefile linux-2.6.39.4/arch/x86/Makefile
18453 --- linux-2.6.39.4/arch/x86/Makefile 2011-05-19 00:06:34.000000000 -0400
18454 +++ linux-2.6.39.4/arch/x86/Makefile 2011-08-05 19:44:35.000000000 -0400
18455 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18456 else
18457 BITS := 64
18458 UTS_MACHINE := x86_64
18459 + biarch := $(call cc-option,-m64)
18460 CHECKFLAGS += -D__x86_64__ -m64
18461
18462 KBUILD_AFLAGS += -m64
18463 @@ -195,3 +196,12 @@ define archhelp
18464 echo ' FDARGS="..." arguments for the booted kernel'
18465 echo ' FDINITRD=file initrd for the booted kernel'
18466 endef
18467 +
18468 +define OLD_LD
18469 +
18470 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18471 +*** Please upgrade your binutils to 2.18 or newer
18472 +endef
18473 +
18474 +archprepare:
18475 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18476 diff -urNp linux-2.6.39.4/arch/x86/mm/extable.c linux-2.6.39.4/arch/x86/mm/extable.c
18477 --- linux-2.6.39.4/arch/x86/mm/extable.c 2011-05-19 00:06:34.000000000 -0400
18478 +++ linux-2.6.39.4/arch/x86/mm/extable.c 2011-08-05 19:44:35.000000000 -0400
18479 @@ -1,14 +1,71 @@
18480 #include <linux/module.h>
18481 #include <linux/spinlock.h>
18482 +#include <linux/sort.h>
18483 #include <asm/uaccess.h>
18484 +#include <asm/pgtable.h>
18485
18486 +/*
18487 + * The exception table needs to be sorted so that the binary
18488 + * search that we use to find entries in it works properly.
18489 + * This is used both for the kernel exception table and for
18490 + * the exception tables of modules that get loaded.
18491 + */
18492 +static int cmp_ex(const void *a, const void *b)
18493 +{
18494 + const struct exception_table_entry *x = a, *y = b;
18495 +
18496 + /* avoid overflow */
18497 + if (x->insn > y->insn)
18498 + return 1;
18499 + if (x->insn < y->insn)
18500 + return -1;
18501 + return 0;
18502 +}
18503 +
18504 +static void swap_ex(void *a, void *b, int size)
18505 +{
18506 + struct exception_table_entry t, *x = a, *y = b;
18507 +
18508 + t = *x;
18509 +
18510 + pax_open_kernel();
18511 + *x = *y;
18512 + *y = t;
18513 + pax_close_kernel();
18514 +}
18515 +
18516 +void sort_extable(struct exception_table_entry *start,
18517 + struct exception_table_entry *finish)
18518 +{
18519 + sort(start, finish - start, sizeof(struct exception_table_entry),
18520 + cmp_ex, swap_ex);
18521 +}
18522 +
18523 +#ifdef CONFIG_MODULES
18524 +/*
18525 + * If the exception table is sorted, any referring to the module init
18526 + * will be at the beginning or the end.
18527 + */
18528 +void trim_init_extable(struct module *m)
18529 +{
18530 + /*trim the beginning*/
18531 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
18532 + m->extable++;
18533 + m->num_exentries--;
18534 + }
18535 + /*trim the end*/
18536 + while (m->num_exentries &&
18537 + within_module_init(m->extable[m->num_exentries-1].insn, m))
18538 + m->num_exentries--;
18539 +}
18540 +#endif /* CONFIG_MODULES */
18541
18542 int fixup_exception(struct pt_regs *regs)
18543 {
18544 const struct exception_table_entry *fixup;
18545
18546 #ifdef CONFIG_PNPBIOS
18547 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18548 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18549 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18550 extern u32 pnp_bios_is_utter_crap;
18551 pnp_bios_is_utter_crap = 1;
18552 diff -urNp linux-2.6.39.4/arch/x86/mm/fault.c linux-2.6.39.4/arch/x86/mm/fault.c
18553 --- linux-2.6.39.4/arch/x86/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
18554 +++ linux-2.6.39.4/arch/x86/mm/fault.c 2011-08-17 20:06:06.000000000 -0400
18555 @@ -12,10 +12,18 @@
18556 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
18557 #include <linux/perf_event.h> /* perf_sw_event */
18558 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18559 +#include <linux/unistd.h>
18560 +#include <linux/compiler.h>
18561
18562 #include <asm/traps.h> /* dotraplinkage, ... */
18563 #include <asm/pgalloc.h> /* pgd_*(), ... */
18564 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18565 +#include <asm/vsyscall.h>
18566 +#include <asm/tlbflush.h>
18567 +
18568 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18569 +#include <asm/stacktrace.h>
18570 +#endif
18571
18572 /*
18573 * Page fault error code bits:
18574 @@ -53,7 +61,7 @@ static inline int __kprobes notify_page_
18575 int ret = 0;
18576
18577 /* kprobe_running() needs smp_processor_id() */
18578 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18579 + if (kprobes_built_in() && !user_mode(regs)) {
18580 preempt_disable();
18581 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18582 ret = 1;
18583 @@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re
18584 return !instr_lo || (instr_lo>>1) == 1;
18585 case 0x00:
18586 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18587 - if (probe_kernel_address(instr, opcode))
18588 + if (user_mode(regs)) {
18589 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18590 + return 0;
18591 + } else if (probe_kernel_address(instr, opcode))
18592 return 0;
18593
18594 *prefetch = (instr_lo == 0xF) &&
18595 @@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign
18596 while (instr < max_instr) {
18597 unsigned char opcode;
18598
18599 - if (probe_kernel_address(instr, opcode))
18600 + if (user_mode(regs)) {
18601 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18602 + break;
18603 + } else if (probe_kernel_address(instr, opcode))
18604 break;
18605
18606 instr++;
18607 @@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s
18608 force_sig_info(si_signo, &info, tsk);
18609 }
18610
18611 +#ifdef CONFIG_PAX_EMUTRAMP
18612 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18613 +#endif
18614 +
18615 +#ifdef CONFIG_PAX_PAGEEXEC
18616 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18617 +{
18618 + pgd_t *pgd;
18619 + pud_t *pud;
18620 + pmd_t *pmd;
18621 +
18622 + pgd = pgd_offset(mm, address);
18623 + if (!pgd_present(*pgd))
18624 + return NULL;
18625 + pud = pud_offset(pgd, address);
18626 + if (!pud_present(*pud))
18627 + return NULL;
18628 + pmd = pmd_offset(pud, address);
18629 + if (!pmd_present(*pmd))
18630 + return NULL;
18631 + return pmd;
18632 +}
18633 +#endif
18634 +
18635 DEFINE_SPINLOCK(pgd_lock);
18636 LIST_HEAD(pgd_list);
18637
18638 @@ -229,10 +267,22 @@ void vmalloc_sync_all(void)
18639 for (address = VMALLOC_START & PMD_MASK;
18640 address >= TASK_SIZE && address < FIXADDR_TOP;
18641 address += PMD_SIZE) {
18642 +
18643 +#ifdef CONFIG_PAX_PER_CPU_PGD
18644 + unsigned long cpu;
18645 +#else
18646 struct page *page;
18647 +#endif
18648
18649 spin_lock(&pgd_lock);
18650 +
18651 +#ifdef CONFIG_PAX_PER_CPU_PGD
18652 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18653 + pgd_t *pgd = get_cpu_pgd(cpu);
18654 + pmd_t *ret;
18655 +#else
18656 list_for_each_entry(page, &pgd_list, lru) {
18657 + pgd_t *pgd = page_address(page);
18658 spinlock_t *pgt_lock;
18659 pmd_t *ret;
18660
18661 @@ -240,8 +290,13 @@ void vmalloc_sync_all(void)
18662 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18663
18664 spin_lock(pgt_lock);
18665 - ret = vmalloc_sync_one(page_address(page), address);
18666 +#endif
18667 +
18668 + ret = vmalloc_sync_one(pgd, address);
18669 +
18670 +#ifndef CONFIG_PAX_PER_CPU_PGD
18671 spin_unlock(pgt_lock);
18672 +#endif
18673
18674 if (!ret)
18675 break;
18676 @@ -275,6 +330,11 @@ static noinline __kprobes int vmalloc_fa
18677 * an interrupt in the middle of a task switch..
18678 */
18679 pgd_paddr = read_cr3();
18680 +
18681 +#ifdef CONFIG_PAX_PER_CPU_PGD
18682 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18683 +#endif
18684 +
18685 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18686 if (!pmd_k)
18687 return -1;
18688 @@ -370,7 +430,14 @@ static noinline __kprobes int vmalloc_fa
18689 * happen within a race in page table update. In the later
18690 * case just flush:
18691 */
18692 +
18693 +#ifdef CONFIG_PAX_PER_CPU_PGD
18694 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18695 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18696 +#else
18697 pgd = pgd_offset(current->active_mm, address);
18698 +#endif
18699 +
18700 pgd_ref = pgd_offset_k(address);
18701 if (pgd_none(*pgd_ref))
18702 return -1;
18703 @@ -532,7 +599,7 @@ static int is_errata93(struct pt_regs *r
18704 static int is_errata100(struct pt_regs *regs, unsigned long address)
18705 {
18706 #ifdef CONFIG_X86_64
18707 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18708 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18709 return 1;
18710 #endif
18711 return 0;
18712 @@ -559,7 +626,7 @@ static int is_f00f_bug(struct pt_regs *r
18713 }
18714
18715 static const char nx_warning[] = KERN_CRIT
18716 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18717 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18718
18719 static void
18720 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18721 @@ -568,15 +635,26 @@ show_fault_oops(struct pt_regs *regs, un
18722 if (!oops_may_print())
18723 return;
18724
18725 - if (error_code & PF_INSTR) {
18726 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18727 unsigned int level;
18728
18729 pte_t *pte = lookup_address(address, &level);
18730
18731 if (pte && pte_present(*pte) && !pte_exec(*pte))
18732 - printk(nx_warning, current_uid());
18733 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18734 }
18735
18736 +#ifdef CONFIG_PAX_KERNEXEC
18737 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18738 + if (current->signal->curr_ip)
18739 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18740 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18741 + else
18742 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18743 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18744 + }
18745 +#endif
18746 +
18747 printk(KERN_ALERT "BUG: unable to handle kernel ");
18748 if (address < PAGE_SIZE)
18749 printk(KERN_CONT "NULL pointer dereference");
18750 @@ -701,6 +779,70 @@ __bad_area_nosemaphore(struct pt_regs *r
18751 unsigned long address, int si_code)
18752 {
18753 struct task_struct *tsk = current;
18754 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18755 + struct mm_struct *mm = tsk->mm;
18756 +#endif
18757 +
18758 +#ifdef CONFIG_X86_64
18759 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18760 + if (regs->ip == (unsigned long)vgettimeofday) {
18761 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
18762 + return;
18763 + } else if (regs->ip == (unsigned long)vtime) {
18764 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
18765 + return;
18766 + } else if (regs->ip == (unsigned long)vgetcpu) {
18767 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
18768 + return;
18769 + }
18770 + }
18771 +#endif
18772 +
18773 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18774 + if (mm && (error_code & PF_USER)) {
18775 + unsigned long ip = regs->ip;
18776 +
18777 + if (v8086_mode(regs))
18778 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18779 +
18780 + /*
18781 + * It's possible to have interrupts off here:
18782 + */
18783 + local_irq_enable();
18784 +
18785 +#ifdef CONFIG_PAX_PAGEEXEC
18786 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18787 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18788 +
18789 +#ifdef CONFIG_PAX_EMUTRAMP
18790 + switch (pax_handle_fetch_fault(regs)) {
18791 + case 2:
18792 + return;
18793 + }
18794 +#endif
18795 +
18796 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18797 + do_group_exit(SIGKILL);
18798 + }
18799 +#endif
18800 +
18801 +#ifdef CONFIG_PAX_SEGMEXEC
18802 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18803 +
18804 +#ifdef CONFIG_PAX_EMUTRAMP
18805 + switch (pax_handle_fetch_fault(regs)) {
18806 + case 2:
18807 + return;
18808 + }
18809 +#endif
18810 +
18811 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18812 + do_group_exit(SIGKILL);
18813 + }
18814 +#endif
18815 +
18816 + }
18817 +#endif
18818
18819 /* User mode accesses just cause a SIGSEGV */
18820 if (error_code & PF_USER) {
18821 @@ -855,6 +997,99 @@ static int spurious_fault_check(unsigned
18822 return 1;
18823 }
18824
18825 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18826 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18827 +{
18828 + pte_t *pte;
18829 + pmd_t *pmd;
18830 + spinlock_t *ptl;
18831 + unsigned char pte_mask;
18832 +
18833 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18834 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18835 + return 0;
18836 +
18837 + /* PaX: it's our fault, let's handle it if we can */
18838 +
18839 + /* PaX: take a look at read faults before acquiring any locks */
18840 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18841 + /* instruction fetch attempt from a protected page in user mode */
18842 + up_read(&mm->mmap_sem);
18843 +
18844 +#ifdef CONFIG_PAX_EMUTRAMP
18845 + switch (pax_handle_fetch_fault(regs)) {
18846 + case 2:
18847 + return 1;
18848 + }
18849 +#endif
18850 +
18851 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18852 + do_group_exit(SIGKILL);
18853 + }
18854 +
18855 + pmd = pax_get_pmd(mm, address);
18856 + if (unlikely(!pmd))
18857 + return 0;
18858 +
18859 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18860 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18861 + pte_unmap_unlock(pte, ptl);
18862 + return 0;
18863 + }
18864 +
18865 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18866 + /* write attempt to a protected page in user mode */
18867 + pte_unmap_unlock(pte, ptl);
18868 + return 0;
18869 + }
18870 +
18871 +#ifdef CONFIG_SMP
18872 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18873 +#else
18874 + if (likely(address > get_limit(regs->cs)))
18875 +#endif
18876 + {
18877 + set_pte(pte, pte_mkread(*pte));
18878 + __flush_tlb_one(address);
18879 + pte_unmap_unlock(pte, ptl);
18880 + up_read(&mm->mmap_sem);
18881 + return 1;
18882 + }
18883 +
18884 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18885 +
18886 + /*
18887 + * PaX: fill DTLB with user rights and retry
18888 + */
18889 + __asm__ __volatile__ (
18890 + "orb %2,(%1)\n"
18891 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18892 +/*
18893 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18894 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18895 + * page fault when examined during a TLB load attempt. this is true not only
18896 + * for PTEs holding a non-present entry but also present entries that will
18897 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18898 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18899 + * for our target pages since their PTEs are simply not in the TLBs at all.
18900 +
18901 + * the best thing in omitting it is that we gain around 15-20% speed in the
18902 + * fast path of the page fault handler and can get rid of tracing since we
18903 + * can no longer flush unintended entries.
18904 + */
18905 + "invlpg (%0)\n"
18906 +#endif
18907 + __copyuser_seg"testb $0,(%0)\n"
18908 + "xorb %3,(%1)\n"
18909 + :
18910 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18911 + : "memory", "cc");
18912 + pte_unmap_unlock(pte, ptl);
18913 + up_read(&mm->mmap_sem);
18914 + return 1;
18915 +}
18916 +#endif
18917 +
18918 /*
18919 * Handle a spurious fault caused by a stale TLB entry.
18920 *
18921 @@ -927,6 +1162,9 @@ int show_unhandled_signals = 1;
18922 static inline int
18923 access_error(unsigned long error_code, struct vm_area_struct *vma)
18924 {
18925 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18926 + return 1;
18927 +
18928 if (error_code & PF_WRITE) {
18929 /* write, present and write, not present: */
18930 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18931 @@ -960,19 +1198,33 @@ do_page_fault(struct pt_regs *regs, unsi
18932 {
18933 struct vm_area_struct *vma;
18934 struct task_struct *tsk;
18935 - unsigned long address;
18936 struct mm_struct *mm;
18937 int fault;
18938 int write = error_code & PF_WRITE;
18939 unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
18940 (write ? FAULT_FLAG_WRITE : 0);
18941
18942 + /* Get the faulting address: */
18943 + unsigned long address = read_cr2();
18944 +
18945 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18946 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18947 + if (!search_exception_tables(regs->ip)) {
18948 + bad_area_nosemaphore(regs, error_code, address);
18949 + return;
18950 + }
18951 + if (address < PAX_USER_SHADOW_BASE) {
18952 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18953 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18954 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18955 + } else
18956 + address -= PAX_USER_SHADOW_BASE;
18957 + }
18958 +#endif
18959 +
18960 tsk = current;
18961 mm = tsk->mm;
18962
18963 - /* Get the faulting address: */
18964 - address = read_cr2();
18965 -
18966 /*
18967 * Detect and handle instructions that would cause a page fault for
18968 * both a tracked kernel page and a userspace page.
18969 @@ -1032,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsi
18970 * User-mode registers count as a user access even for any
18971 * potential system fault or CPU buglet:
18972 */
18973 - if (user_mode_vm(regs)) {
18974 + if (user_mode(regs)) {
18975 local_irq_enable();
18976 error_code |= PF_USER;
18977 } else {
18978 @@ -1087,6 +1339,11 @@ retry:
18979 might_sleep();
18980 }
18981
18982 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18983 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18984 + return;
18985 +#endif
18986 +
18987 vma = find_vma(mm, address);
18988 if (unlikely(!vma)) {
18989 bad_area(regs, error_code, address);
18990 @@ -1098,18 +1355,24 @@ retry:
18991 bad_area(regs, error_code, address);
18992 return;
18993 }
18994 - if (error_code & PF_USER) {
18995 - /*
18996 - * Accessing the stack below %sp is always a bug.
18997 - * The large cushion allows instructions like enter
18998 - * and pusha to work. ("enter $65535, $31" pushes
18999 - * 32 pointers and then decrements %sp by 65535.)
19000 - */
19001 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19002 - bad_area(regs, error_code, address);
19003 - return;
19004 - }
19005 + /*
19006 + * Accessing the stack below %sp is always a bug.
19007 + * The large cushion allows instructions like enter
19008 + * and pusha to work. ("enter $65535, $31" pushes
19009 + * 32 pointers and then decrements %sp by 65535.)
19010 + */
19011 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19012 + bad_area(regs, error_code, address);
19013 + return;
19014 + }
19015 +
19016 +#ifdef CONFIG_PAX_SEGMEXEC
19017 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19018 + bad_area(regs, error_code, address);
19019 + return;
19020 }
19021 +#endif
19022 +
19023 if (unlikely(expand_stack(vma, address))) {
19024 bad_area(regs, error_code, address);
19025 return;
19026 @@ -1164,3 +1427,199 @@ good_area:
19027
19028 up_read(&mm->mmap_sem);
19029 }
19030 +
19031 +#ifdef CONFIG_PAX_EMUTRAMP
19032 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19033 +{
19034 + int err;
19035 +
19036 + do { /* PaX: gcc trampoline emulation #1 */
19037 + unsigned char mov1, mov2;
19038 + unsigned short jmp;
19039 + unsigned int addr1, addr2;
19040 +
19041 +#ifdef CONFIG_X86_64
19042 + if ((regs->ip + 11) >> 32)
19043 + break;
19044 +#endif
19045 +
19046 + err = get_user(mov1, (unsigned char __user *)regs->ip);
19047 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19048 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19049 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19050 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19051 +
19052 + if (err)
19053 + break;
19054 +
19055 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19056 + regs->cx = addr1;
19057 + regs->ax = addr2;
19058 + regs->ip = addr2;
19059 + return 2;
19060 + }
19061 + } while (0);
19062 +
19063 + do { /* PaX: gcc trampoline emulation #2 */
19064 + unsigned char mov, jmp;
19065 + unsigned int addr1, addr2;
19066 +
19067 +#ifdef CONFIG_X86_64
19068 + if ((regs->ip + 9) >> 32)
19069 + break;
19070 +#endif
19071 +
19072 + err = get_user(mov, (unsigned char __user *)regs->ip);
19073 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19074 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19075 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19076 +
19077 + if (err)
19078 + break;
19079 +
19080 + if (mov == 0xB9 && jmp == 0xE9) {
19081 + regs->cx = addr1;
19082 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19083 + return 2;
19084 + }
19085 + } while (0);
19086 +
19087 + return 1; /* PaX in action */
19088 +}
19089 +
19090 +#ifdef CONFIG_X86_64
19091 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19092 +{
19093 + int err;
19094 +
19095 + do { /* PaX: gcc trampoline emulation #1 */
19096 + unsigned short mov1, mov2, jmp1;
19097 + unsigned char jmp2;
19098 + unsigned int addr1;
19099 + unsigned long addr2;
19100 +
19101 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19102 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19103 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19104 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19105 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19106 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19107 +
19108 + if (err)
19109 + break;
19110 +
19111 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19112 + regs->r11 = addr1;
19113 + regs->r10 = addr2;
19114 + regs->ip = addr1;
19115 + return 2;
19116 + }
19117 + } while (0);
19118 +
19119 + do { /* PaX: gcc trampoline emulation #2 */
19120 + unsigned short mov1, mov2, jmp1;
19121 + unsigned char jmp2;
19122 + unsigned long addr1, addr2;
19123 +
19124 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19125 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19126 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19127 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19128 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19129 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19130 +
19131 + if (err)
19132 + break;
19133 +
19134 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19135 + regs->r11 = addr1;
19136 + regs->r10 = addr2;
19137 + regs->ip = addr1;
19138 + return 2;
19139 + }
19140 + } while (0);
19141 +
19142 + return 1; /* PaX in action */
19143 +}
19144 +#endif
19145 +
19146 +/*
19147 + * PaX: decide what to do with offenders (regs->ip = fault address)
19148 + *
19149 + * returns 1 when task should be killed
19150 + * 2 when gcc trampoline was detected
19151 + */
19152 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19153 +{
19154 + if (v8086_mode(regs))
19155 + return 1;
19156 +
19157 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19158 + return 1;
19159 +
19160 +#ifdef CONFIG_X86_32
19161 + return pax_handle_fetch_fault_32(regs);
19162 +#else
19163 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19164 + return pax_handle_fetch_fault_32(regs);
19165 + else
19166 + return pax_handle_fetch_fault_64(regs);
19167 +#endif
19168 +}
19169 +#endif
19170 +
19171 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19172 +void pax_report_insns(void *pc, void *sp)
19173 +{
19174 + long i;
19175 +
19176 + printk(KERN_ERR "PAX: bytes at PC: ");
19177 + for (i = 0; i < 20; i++) {
19178 + unsigned char c;
19179 + if (get_user(c, (__force unsigned char __user *)pc+i))
19180 + printk(KERN_CONT "?? ");
19181 + else
19182 + printk(KERN_CONT "%02x ", c);
19183 + }
19184 + printk("\n");
19185 +
19186 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19187 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19188 + unsigned long c;
19189 + if (get_user(c, (__force unsigned long __user *)sp+i))
19190 +#ifdef CONFIG_X86_32
19191 + printk(KERN_CONT "???????? ");
19192 +#else
19193 + printk(KERN_CONT "???????????????? ");
19194 +#endif
19195 + else
19196 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19197 + }
19198 + printk("\n");
19199 +}
19200 +#endif
19201 +
19202 +/**
19203 + * probe_kernel_write(): safely attempt to write to a location
19204 + * @dst: address to write to
19205 + * @src: pointer to the data that shall be written
19206 + * @size: size of the data chunk
19207 + *
19208 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19209 + * happens, handle that and return -EFAULT.
19210 + */
19211 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19212 +{
19213 + long ret;
19214 + mm_segment_t old_fs = get_fs();
19215 +
19216 + set_fs(KERNEL_DS);
19217 + pagefault_disable();
19218 + pax_open_kernel();
19219 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19220 + pax_close_kernel();
19221 + pagefault_enable();
19222 + set_fs(old_fs);
19223 +
19224 + return ret ? -EFAULT : 0;
19225 +}
19226 diff -urNp linux-2.6.39.4/arch/x86/mm/gup.c linux-2.6.39.4/arch/x86/mm/gup.c
19227 --- linux-2.6.39.4/arch/x86/mm/gup.c 2011-05-19 00:06:34.000000000 -0400
19228 +++ linux-2.6.39.4/arch/x86/mm/gup.c 2011-08-05 19:44:35.000000000 -0400
19229 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19230 addr = start;
19231 len = (unsigned long) nr_pages << PAGE_SHIFT;
19232 end = start + len;
19233 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19234 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19235 (void __user *)start, len)))
19236 return 0;
19237
19238 diff -urNp linux-2.6.39.4/arch/x86/mm/highmem_32.c linux-2.6.39.4/arch/x86/mm/highmem_32.c
19239 --- linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-05-19 00:06:34.000000000 -0400
19240 +++ linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-08-05 19:44:35.000000000 -0400
19241 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19242 idx = type + KM_TYPE_NR*smp_processor_id();
19243 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19244 BUG_ON(!pte_none(*(kmap_pte-idx)));
19245 +
19246 + pax_open_kernel();
19247 set_pte(kmap_pte-idx, mk_pte(page, prot));
19248 + pax_close_kernel();
19249
19250 return (void *)vaddr;
19251 }
19252 diff -urNp linux-2.6.39.4/arch/x86/mm/hugetlbpage.c linux-2.6.39.4/arch/x86/mm/hugetlbpage.c
19253 --- linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
19254 +++ linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-08-05 19:44:35.000000000 -0400
19255 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19256 struct hstate *h = hstate_file(file);
19257 struct mm_struct *mm = current->mm;
19258 struct vm_area_struct *vma;
19259 - unsigned long start_addr;
19260 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19261 +
19262 +#ifdef CONFIG_PAX_SEGMEXEC
19263 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19264 + pax_task_size = SEGMEXEC_TASK_SIZE;
19265 +#endif
19266 +
19267 + pax_task_size -= PAGE_SIZE;
19268
19269 if (len > mm->cached_hole_size) {
19270 - start_addr = mm->free_area_cache;
19271 + start_addr = mm->free_area_cache;
19272 } else {
19273 - start_addr = TASK_UNMAPPED_BASE;
19274 - mm->cached_hole_size = 0;
19275 + start_addr = mm->mmap_base;
19276 + mm->cached_hole_size = 0;
19277 }
19278
19279 full_search:
19280 @@ -280,26 +287,27 @@ full_search:
19281
19282 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19283 /* At this point: (!vma || addr < vma->vm_end). */
19284 - if (TASK_SIZE - len < addr) {
19285 + if (pax_task_size - len < addr) {
19286 /*
19287 * Start a new search - just in case we missed
19288 * some holes.
19289 */
19290 - if (start_addr != TASK_UNMAPPED_BASE) {
19291 - start_addr = TASK_UNMAPPED_BASE;
19292 + if (start_addr != mm->mmap_base) {
19293 + start_addr = mm->mmap_base;
19294 mm->cached_hole_size = 0;
19295 goto full_search;
19296 }
19297 return -ENOMEM;
19298 }
19299 - if (!vma || addr + len <= vma->vm_start) {
19300 - mm->free_area_cache = addr + len;
19301 - return addr;
19302 - }
19303 + if (check_heap_stack_gap(vma, addr, len))
19304 + break;
19305 if (addr + mm->cached_hole_size < vma->vm_start)
19306 mm->cached_hole_size = vma->vm_start - addr;
19307 addr = ALIGN(vma->vm_end, huge_page_size(h));
19308 }
19309 +
19310 + mm->free_area_cache = addr + len;
19311 + return addr;
19312 }
19313
19314 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19315 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19316 {
19317 struct hstate *h = hstate_file(file);
19318 struct mm_struct *mm = current->mm;
19319 - struct vm_area_struct *vma, *prev_vma;
19320 - unsigned long base = mm->mmap_base, addr = addr0;
19321 + struct vm_area_struct *vma;
19322 + unsigned long base = mm->mmap_base, addr;
19323 unsigned long largest_hole = mm->cached_hole_size;
19324 - int first_time = 1;
19325
19326 /* don't allow allocations above current base */
19327 if (mm->free_area_cache > base)
19328 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19329 largest_hole = 0;
19330 mm->free_area_cache = base;
19331 }
19332 -try_again:
19333 +
19334 /* make sure it can fit in the remaining address space */
19335 if (mm->free_area_cache < len)
19336 goto fail;
19337
19338 /* either no address requested or can't fit in requested address hole */
19339 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19340 + addr = (mm->free_area_cache - len);
19341 do {
19342 + addr &= huge_page_mask(h);
19343 + vma = find_vma(mm, addr);
19344 /*
19345 * Lookup failure means no vma is above this address,
19346 * i.e. return with success:
19347 - */
19348 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19349 - return addr;
19350 -
19351 - /*
19352 * new region fits between prev_vma->vm_end and
19353 * vma->vm_start, use it:
19354 */
19355 - if (addr + len <= vma->vm_start &&
19356 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19357 + if (check_heap_stack_gap(vma, addr, len)) {
19358 /* remember the address as a hint for next time */
19359 - mm->cached_hole_size = largest_hole;
19360 - return (mm->free_area_cache = addr);
19361 - } else {
19362 - /* pull free_area_cache down to the first hole */
19363 - if (mm->free_area_cache == vma->vm_end) {
19364 - mm->free_area_cache = vma->vm_start;
19365 - mm->cached_hole_size = largest_hole;
19366 - }
19367 + mm->cached_hole_size = largest_hole;
19368 + return (mm->free_area_cache = addr);
19369 + }
19370 + /* pull free_area_cache down to the first hole */
19371 + if (mm->free_area_cache == vma->vm_end) {
19372 + mm->free_area_cache = vma->vm_start;
19373 + mm->cached_hole_size = largest_hole;
19374 }
19375
19376 /* remember the largest hole we saw so far */
19377 if (addr + largest_hole < vma->vm_start)
19378 - largest_hole = vma->vm_start - addr;
19379 + largest_hole = vma->vm_start - addr;
19380
19381 /* try just below the current vma->vm_start */
19382 - addr = (vma->vm_start - len) & huge_page_mask(h);
19383 - } while (len <= vma->vm_start);
19384 + addr = skip_heap_stack_gap(vma, len);
19385 + } while (!IS_ERR_VALUE(addr));
19386
19387 fail:
19388 /*
19389 - * if hint left us with no space for the requested
19390 - * mapping then try again:
19391 - */
19392 - if (first_time) {
19393 - mm->free_area_cache = base;
19394 - largest_hole = 0;
19395 - first_time = 0;
19396 - goto try_again;
19397 - }
19398 - /*
19399 * A failed mmap() very likely causes application failure,
19400 * so fall back to the bottom-up function here. This scenario
19401 * can happen with large stack limits and large mmap()
19402 * allocations.
19403 */
19404 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19405 +
19406 +#ifdef CONFIG_PAX_SEGMEXEC
19407 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19408 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19409 + else
19410 +#endif
19411 +
19412 + mm->mmap_base = TASK_UNMAPPED_BASE;
19413 +
19414 +#ifdef CONFIG_PAX_RANDMMAP
19415 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19416 + mm->mmap_base += mm->delta_mmap;
19417 +#endif
19418 +
19419 + mm->free_area_cache = mm->mmap_base;
19420 mm->cached_hole_size = ~0UL;
19421 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19422 len, pgoff, flags);
19423 @@ -386,6 +392,7 @@ fail:
19424 /*
19425 * Restore the topdown base:
19426 */
19427 + mm->mmap_base = base;
19428 mm->free_area_cache = base;
19429 mm->cached_hole_size = ~0UL;
19430
19431 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19432 struct hstate *h = hstate_file(file);
19433 struct mm_struct *mm = current->mm;
19434 struct vm_area_struct *vma;
19435 + unsigned long pax_task_size = TASK_SIZE;
19436
19437 if (len & ~huge_page_mask(h))
19438 return -EINVAL;
19439 - if (len > TASK_SIZE)
19440 +
19441 +#ifdef CONFIG_PAX_SEGMEXEC
19442 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19443 + pax_task_size = SEGMEXEC_TASK_SIZE;
19444 +#endif
19445 +
19446 + pax_task_size -= PAGE_SIZE;
19447 +
19448 + if (len > pax_task_size)
19449 return -ENOMEM;
19450
19451 if (flags & MAP_FIXED) {
19452 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19453 if (addr) {
19454 addr = ALIGN(addr, huge_page_size(h));
19455 vma = find_vma(mm, addr);
19456 - if (TASK_SIZE - len >= addr &&
19457 - (!vma || addr + len <= vma->vm_start))
19458 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19459 return addr;
19460 }
19461 if (mm->get_unmapped_area == arch_get_unmapped_area)
19462 diff -urNp linux-2.6.39.4/arch/x86/mm/init_32.c linux-2.6.39.4/arch/x86/mm/init_32.c
19463 --- linux-2.6.39.4/arch/x86/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
19464 +++ linux-2.6.39.4/arch/x86/mm/init_32.c 2011-08-05 19:44:35.000000000 -0400
19465 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19466 }
19467
19468 /*
19469 - * Creates a middle page table and puts a pointer to it in the
19470 - * given global directory entry. This only returns the gd entry
19471 - * in non-PAE compilation mode, since the middle layer is folded.
19472 - */
19473 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19474 -{
19475 - pud_t *pud;
19476 - pmd_t *pmd_table;
19477 -
19478 -#ifdef CONFIG_X86_PAE
19479 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19480 - if (after_bootmem)
19481 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19482 - else
19483 - pmd_table = (pmd_t *)alloc_low_page();
19484 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19485 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19486 - pud = pud_offset(pgd, 0);
19487 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19488 -
19489 - return pmd_table;
19490 - }
19491 -#endif
19492 - pud = pud_offset(pgd, 0);
19493 - pmd_table = pmd_offset(pud, 0);
19494 -
19495 - return pmd_table;
19496 -}
19497 -
19498 -/*
19499 * Create a page table and place a pointer to it in a middle page
19500 * directory entry:
19501 */
19502 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19503 page_table = (pte_t *)alloc_low_page();
19504
19505 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19506 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19507 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19508 +#else
19509 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19510 +#endif
19511 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19512 }
19513
19514 return pte_offset_kernel(pmd, 0);
19515 }
19516
19517 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19518 +{
19519 + pud_t *pud;
19520 + pmd_t *pmd_table;
19521 +
19522 + pud = pud_offset(pgd, 0);
19523 + pmd_table = pmd_offset(pud, 0);
19524 +
19525 + return pmd_table;
19526 +}
19527 +
19528 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19529 {
19530 int pgd_idx = pgd_index(vaddr);
19531 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19532 int pgd_idx, pmd_idx;
19533 unsigned long vaddr;
19534 pgd_t *pgd;
19535 + pud_t *pud;
19536 pmd_t *pmd;
19537 pte_t *pte = NULL;
19538
19539 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19540 pgd = pgd_base + pgd_idx;
19541
19542 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19543 - pmd = one_md_table_init(pgd);
19544 - pmd = pmd + pmd_index(vaddr);
19545 + pud = pud_offset(pgd, vaddr);
19546 + pmd = pmd_offset(pud, vaddr);
19547 +
19548 +#ifdef CONFIG_X86_PAE
19549 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19550 +#endif
19551 +
19552 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19553 pmd++, pmd_idx++) {
19554 pte = page_table_kmap_check(one_page_table_init(pmd),
19555 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19556 }
19557 }
19558
19559 -static inline int is_kernel_text(unsigned long addr)
19560 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19561 {
19562 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19563 - return 1;
19564 - return 0;
19565 + if ((start > ktla_ktva((unsigned long)_etext) ||
19566 + end <= ktla_ktva((unsigned long)_stext)) &&
19567 + (start > ktla_ktva((unsigned long)_einittext) ||
19568 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19569 +
19570 +#ifdef CONFIG_ACPI_SLEEP
19571 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19572 +#endif
19573 +
19574 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19575 + return 0;
19576 + return 1;
19577 }
19578
19579 /*
19580 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19581 unsigned long last_map_addr = end;
19582 unsigned long start_pfn, end_pfn;
19583 pgd_t *pgd_base = swapper_pg_dir;
19584 - int pgd_idx, pmd_idx, pte_ofs;
19585 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19586 unsigned long pfn;
19587 pgd_t *pgd;
19588 + pud_t *pud;
19589 pmd_t *pmd;
19590 pte_t *pte;
19591 unsigned pages_2m, pages_4k;
19592 @@ -281,8 +282,13 @@ repeat:
19593 pfn = start_pfn;
19594 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19595 pgd = pgd_base + pgd_idx;
19596 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19597 - pmd = one_md_table_init(pgd);
19598 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19599 + pud = pud_offset(pgd, 0);
19600 + pmd = pmd_offset(pud, 0);
19601 +
19602 +#ifdef CONFIG_X86_PAE
19603 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19604 +#endif
19605
19606 if (pfn >= end_pfn)
19607 continue;
19608 @@ -294,14 +300,13 @@ repeat:
19609 #endif
19610 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19611 pmd++, pmd_idx++) {
19612 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19613 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19614
19615 /*
19616 * Map with big pages if possible, otherwise
19617 * create normal page tables:
19618 */
19619 if (use_pse) {
19620 - unsigned int addr2;
19621 pgprot_t prot = PAGE_KERNEL_LARGE;
19622 /*
19623 * first pass will use the same initial
19624 @@ -311,11 +316,7 @@ repeat:
19625 __pgprot(PTE_IDENT_ATTR |
19626 _PAGE_PSE);
19627
19628 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19629 - PAGE_OFFSET + PAGE_SIZE-1;
19630 -
19631 - if (is_kernel_text(addr) ||
19632 - is_kernel_text(addr2))
19633 + if (is_kernel_text(address, address + PMD_SIZE))
19634 prot = PAGE_KERNEL_LARGE_EXEC;
19635
19636 pages_2m++;
19637 @@ -332,7 +333,7 @@ repeat:
19638 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19639 pte += pte_ofs;
19640 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19641 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19642 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19643 pgprot_t prot = PAGE_KERNEL;
19644 /*
19645 * first pass will use the same initial
19646 @@ -340,7 +341,7 @@ repeat:
19647 */
19648 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19649
19650 - if (is_kernel_text(addr))
19651 + if (is_kernel_text(address, address + PAGE_SIZE))
19652 prot = PAGE_KERNEL_EXEC;
19653
19654 pages_4k++;
19655 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19656
19657 pud = pud_offset(pgd, va);
19658 pmd = pmd_offset(pud, va);
19659 - if (!pmd_present(*pmd))
19660 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19661 break;
19662
19663 pte = pte_offset_kernel(pmd, va);
19664 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19665
19666 static void __init pagetable_init(void)
19667 {
19668 - pgd_t *pgd_base = swapper_pg_dir;
19669 -
19670 - permanent_kmaps_init(pgd_base);
19671 + permanent_kmaps_init(swapper_pg_dir);
19672 }
19673
19674 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19675 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19676 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19677
19678 /* user-defined highmem size */
19679 @@ -754,6 +753,12 @@ void __init mem_init(void)
19680
19681 pci_iommu_alloc();
19682
19683 +#ifdef CONFIG_PAX_PER_CPU_PGD
19684 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19685 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19686 + KERNEL_PGD_PTRS);
19687 +#endif
19688 +
19689 #ifdef CONFIG_FLATMEM
19690 BUG_ON(!mem_map);
19691 #endif
19692 @@ -771,7 +776,7 @@ void __init mem_init(void)
19693 set_highmem_pages_init();
19694
19695 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19696 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19697 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19698 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19699
19700 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19701 @@ -812,10 +817,10 @@ void __init mem_init(void)
19702 ((unsigned long)&__init_end -
19703 (unsigned long)&__init_begin) >> 10,
19704
19705 - (unsigned long)&_etext, (unsigned long)&_edata,
19706 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19707 + (unsigned long)&_sdata, (unsigned long)&_edata,
19708 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19709
19710 - (unsigned long)&_text, (unsigned long)&_etext,
19711 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19712 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19713
19714 /*
19715 @@ -893,6 +898,7 @@ void set_kernel_text_rw(void)
19716 if (!kernel_set_to_readonly)
19717 return;
19718
19719 + start = ktla_ktva(start);
19720 pr_debug("Set kernel text: %lx - %lx for read write\n",
19721 start, start+size);
19722
19723 @@ -907,6 +913,7 @@ void set_kernel_text_ro(void)
19724 if (!kernel_set_to_readonly)
19725 return;
19726
19727 + start = ktla_ktva(start);
19728 pr_debug("Set kernel text: %lx - %lx for read only\n",
19729 start, start+size);
19730
19731 @@ -935,6 +942,7 @@ void mark_rodata_ro(void)
19732 unsigned long start = PFN_ALIGN(_text);
19733 unsigned long size = PFN_ALIGN(_etext) - start;
19734
19735 + start = ktla_ktva(start);
19736 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19737 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19738 size >> 10);
19739 diff -urNp linux-2.6.39.4/arch/x86/mm/init_64.c linux-2.6.39.4/arch/x86/mm/init_64.c
19740 --- linux-2.6.39.4/arch/x86/mm/init_64.c 2011-05-19 00:06:34.000000000 -0400
19741 +++ linux-2.6.39.4/arch/x86/mm/init_64.c 2011-08-05 19:44:35.000000000 -0400
19742 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa
19743 * around without checking the pgd every time.
19744 */
19745
19746 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19747 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19748 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19749
19750 int force_personality32;
19751 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star
19752
19753 for (address = start; address <= end; address += PGDIR_SIZE) {
19754 const pgd_t *pgd_ref = pgd_offset_k(address);
19755 +
19756 +#ifdef CONFIG_PAX_PER_CPU_PGD
19757 + unsigned long cpu;
19758 +#else
19759 struct page *page;
19760 +#endif
19761
19762 if (pgd_none(*pgd_ref))
19763 continue;
19764
19765 spin_lock(&pgd_lock);
19766 +
19767 +#ifdef CONFIG_PAX_PER_CPU_PGD
19768 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19769 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19770 +#else
19771 list_for_each_entry(page, &pgd_list, lru) {
19772 pgd_t *pgd;
19773 spinlock_t *pgt_lock;
19774 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star
19775 /* the pgt_lock only for Xen */
19776 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19777 spin_lock(pgt_lock);
19778 +#endif
19779
19780 if (pgd_none(*pgd))
19781 set_pgd(pgd, *pgd_ref);
19782 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star
19783 BUG_ON(pgd_page_vaddr(*pgd)
19784 != pgd_page_vaddr(*pgd_ref));
19785
19786 +#ifndef CONFIG_PAX_PER_CPU_PGD
19787 spin_unlock(pgt_lock);
19788 +#endif
19789 +
19790 }
19791 spin_unlock(&pgd_lock);
19792 }
19793 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19794 pmd = fill_pmd(pud, vaddr);
19795 pte = fill_pte(pmd, vaddr);
19796
19797 + pax_open_kernel();
19798 set_pte(pte, new_pte);
19799 + pax_close_kernel();
19800
19801 /*
19802 * It's enough to flush this one mapping.
19803 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(
19804 pgd = pgd_offset_k((unsigned long)__va(phys));
19805 if (pgd_none(*pgd)) {
19806 pud = (pud_t *) spp_getpage();
19807 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19808 - _PAGE_USER));
19809 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19810 }
19811 pud = pud_offset(pgd, (unsigned long)__va(phys));
19812 if (pud_none(*pud)) {
19813 pmd = (pmd_t *) spp_getpage();
19814 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19815 - _PAGE_USER));
19816 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19817 }
19818 pmd = pmd_offset(pud, phys);
19819 BUG_ON(!pmd_none(*pmd));
19820 @@ -698,6 +712,12 @@ void __init mem_init(void)
19821
19822 pci_iommu_alloc();
19823
19824 +#ifdef CONFIG_PAX_PER_CPU_PGD
19825 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19826 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19827 + KERNEL_PGD_PTRS);
19828 +#endif
19829 +
19830 /* clear_bss() already clear the empty_zero_page */
19831
19832 reservedpages = 0;
19833 @@ -858,8 +878,8 @@ int kern_addr_valid(unsigned long addr)
19834 static struct vm_area_struct gate_vma = {
19835 .vm_start = VSYSCALL_START,
19836 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19837 - .vm_page_prot = PAGE_READONLY_EXEC,
19838 - .vm_flags = VM_READ | VM_EXEC
19839 + .vm_page_prot = PAGE_READONLY,
19840 + .vm_flags = VM_READ
19841 };
19842
19843 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19844 @@ -893,7 +913,7 @@ int in_gate_area_no_mm(unsigned long add
19845
19846 const char *arch_vma_name(struct vm_area_struct *vma)
19847 {
19848 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19849 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19850 return "[vdso]";
19851 if (vma == &gate_vma)
19852 return "[vsyscall]";
19853 diff -urNp linux-2.6.39.4/arch/x86/mm/init.c linux-2.6.39.4/arch/x86/mm/init.c
19854 --- linux-2.6.39.4/arch/x86/mm/init.c 2011-05-19 00:06:34.000000000 -0400
19855 +++ linux-2.6.39.4/arch/x86/mm/init.c 2011-08-05 19:44:35.000000000 -0400
19856 @@ -33,7 +33,7 @@ int direct_gbpages
19857 static void __init find_early_table_space(unsigned long end, int use_pse,
19858 int use_gbpages)
19859 {
19860 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19861 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19862 phys_addr_t base;
19863
19864 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19865 @@ -315,12 +315,34 @@ unsigned long __init_refok init_memory_m
19866 */
19867 int devmem_is_allowed(unsigned long pagenr)
19868 {
19869 - if (pagenr <= 256)
19870 +#ifdef CONFIG_GRKERNSEC_KMEM
19871 + /* allow BDA */
19872 + if (!pagenr)
19873 + return 1;
19874 + /* allow EBDA */
19875 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19876 + return 1;
19877 +#else
19878 + if (!pagenr)
19879 + return 1;
19880 +#ifdef CONFIG_VM86
19881 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19882 + return 1;
19883 +#endif
19884 +#endif
19885 +
19886 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19887 return 1;
19888 +#ifdef CONFIG_GRKERNSEC_KMEM
19889 + /* throw out everything else below 1MB */
19890 + if (pagenr <= 256)
19891 + return 0;
19892 +#endif
19893 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19894 return 0;
19895 if (!page_is_ram(pagenr))
19896 return 1;
19897 +
19898 return 0;
19899 }
19900
19901 @@ -375,6 +397,86 @@ void free_init_pages(char *what, unsigne
19902
19903 void free_initmem(void)
19904 {
19905 +
19906 +#ifdef CONFIG_PAX_KERNEXEC
19907 +#ifdef CONFIG_X86_32
19908 + /* PaX: limit KERNEL_CS to actual size */
19909 + unsigned long addr, limit;
19910 + struct desc_struct d;
19911 + int cpu;
19912 +
19913 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19914 + limit = (limit - 1UL) >> PAGE_SHIFT;
19915 +
19916 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19917 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19918 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19919 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19920 + }
19921 +
19922 + /* PaX: make KERNEL_CS read-only */
19923 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19924 + if (!paravirt_enabled())
19925 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19926 +/*
19927 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19928 + pgd = pgd_offset_k(addr);
19929 + pud = pud_offset(pgd, addr);
19930 + pmd = pmd_offset(pud, addr);
19931 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19932 + }
19933 +*/
19934 +#ifdef CONFIG_X86_PAE
19935 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19936 +/*
19937 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19938 + pgd = pgd_offset_k(addr);
19939 + pud = pud_offset(pgd, addr);
19940 + pmd = pmd_offset(pud, addr);
19941 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19942 + }
19943 +*/
19944 +#endif
19945 +
19946 +#ifdef CONFIG_MODULES
19947 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19948 +#endif
19949 +
19950 +#else
19951 + pgd_t *pgd;
19952 + pud_t *pud;
19953 + pmd_t *pmd;
19954 + unsigned long addr, end;
19955 +
19956 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19957 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19958 + pgd = pgd_offset_k(addr);
19959 + pud = pud_offset(pgd, addr);
19960 + pmd = pmd_offset(pud, addr);
19961 + if (!pmd_present(*pmd))
19962 + continue;
19963 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19964 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19965 + else
19966 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19967 + }
19968 +
19969 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19970 + end = addr + KERNEL_IMAGE_SIZE;
19971 + for (; addr < end; addr += PMD_SIZE) {
19972 + pgd = pgd_offset_k(addr);
19973 + pud = pud_offset(pgd, addr);
19974 + pmd = pmd_offset(pud, addr);
19975 + if (!pmd_present(*pmd))
19976 + continue;
19977 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19978 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19979 + }
19980 +#endif
19981 +
19982 + flush_tlb_all();
19983 +#endif
19984 +
19985 free_init_pages("unused kernel memory",
19986 (unsigned long)(&__init_begin),
19987 (unsigned long)(&__init_end));
19988 diff -urNp linux-2.6.39.4/arch/x86/mm/iomap_32.c linux-2.6.39.4/arch/x86/mm/iomap_32.c
19989 --- linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-05-19 00:06:34.000000000 -0400
19990 +++ linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-08-05 19:44:35.000000000 -0400
19991 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19992 type = kmap_atomic_idx_push();
19993 idx = type + KM_TYPE_NR * smp_processor_id();
19994 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19995 +
19996 + pax_open_kernel();
19997 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19998 + pax_close_kernel();
19999 +
20000 arch_flush_lazy_mmu_mode();
20001
20002 return (void *)vaddr;
20003 diff -urNp linux-2.6.39.4/arch/x86/mm/ioremap.c linux-2.6.39.4/arch/x86/mm/ioremap.c
20004 --- linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-05-19 00:06:34.000000000 -0400
20005 +++ linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-08-05 19:44:35.000000000 -0400
20006 @@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re
20007 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20008 int is_ram = page_is_ram(pfn);
20009
20010 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20011 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20012 return NULL;
20013 WARN_ON_ONCE(is_ram);
20014 }
20015 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20016 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20017
20018 static __initdata int after_paging_init;
20019 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20020 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20021
20022 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20023 {
20024 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20025 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20026
20027 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20028 - memset(bm_pte, 0, sizeof(bm_pte));
20029 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
20030 + pmd_populate_user(&init_mm, pmd, bm_pte);
20031
20032 /*
20033 * The boot-ioremap range spans multiple pmds, for which
20034 diff -urNp linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c
20035 --- linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-19 00:06:34.000000000 -0400
20036 +++ linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-05 19:44:35.000000000 -0400
20037 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20038 * memory (e.g. tracked pages)? For now, we need this to avoid
20039 * invoking kmemcheck for PnP BIOS calls.
20040 */
20041 - if (regs->flags & X86_VM_MASK)
20042 + if (v8086_mode(regs))
20043 return false;
20044 - if (regs->cs != __KERNEL_CS)
20045 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20046 return false;
20047
20048 pte = kmemcheck_pte_lookup(address);
20049 diff -urNp linux-2.6.39.4/arch/x86/mm/mmap.c linux-2.6.39.4/arch/x86/mm/mmap.c
20050 --- linux-2.6.39.4/arch/x86/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
20051 +++ linux-2.6.39.4/arch/x86/mm/mmap.c 2011-08-05 19:44:35.000000000 -0400
20052 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20053 * Leave an at least ~128 MB hole with possible stack randomization.
20054 */
20055 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20056 -#define MAX_GAP (TASK_SIZE/6*5)
20057 +#define MAX_GAP (pax_task_size/6*5)
20058
20059 /*
20060 * True on X86_32 or when emulating IA32 on X86_64
20061 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20062 return rnd << PAGE_SHIFT;
20063 }
20064
20065 -static unsigned long mmap_base(void)
20066 +static unsigned long mmap_base(struct mm_struct *mm)
20067 {
20068 unsigned long gap = rlimit(RLIMIT_STACK);
20069 + unsigned long pax_task_size = TASK_SIZE;
20070 +
20071 +#ifdef CONFIG_PAX_SEGMEXEC
20072 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20073 + pax_task_size = SEGMEXEC_TASK_SIZE;
20074 +#endif
20075
20076 if (gap < MIN_GAP)
20077 gap = MIN_GAP;
20078 else if (gap > MAX_GAP)
20079 gap = MAX_GAP;
20080
20081 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20082 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20083 }
20084
20085 /*
20086 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20087 * does, but not when emulating X86_32
20088 */
20089 -static unsigned long mmap_legacy_base(void)
20090 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
20091 {
20092 - if (mmap_is_ia32())
20093 + if (mmap_is_ia32()) {
20094 +
20095 +#ifdef CONFIG_PAX_SEGMEXEC
20096 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20097 + return SEGMEXEC_TASK_UNMAPPED_BASE;
20098 + else
20099 +#endif
20100 +
20101 return TASK_UNMAPPED_BASE;
20102 - else
20103 + } else
20104 return TASK_UNMAPPED_BASE + mmap_rnd();
20105 }
20106
20107 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20108 void arch_pick_mmap_layout(struct mm_struct *mm)
20109 {
20110 if (mmap_is_legacy()) {
20111 - mm->mmap_base = mmap_legacy_base();
20112 + mm->mmap_base = mmap_legacy_base(mm);
20113 +
20114 +#ifdef CONFIG_PAX_RANDMMAP
20115 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20116 + mm->mmap_base += mm->delta_mmap;
20117 +#endif
20118 +
20119 mm->get_unmapped_area = arch_get_unmapped_area;
20120 mm->unmap_area = arch_unmap_area;
20121 } else {
20122 - mm->mmap_base = mmap_base();
20123 + mm->mmap_base = mmap_base(mm);
20124 +
20125 +#ifdef CONFIG_PAX_RANDMMAP
20126 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20127 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20128 +#endif
20129 +
20130 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20131 mm->unmap_area = arch_unmap_area_topdown;
20132 }
20133 diff -urNp linux-2.6.39.4/arch/x86/mm/mmio-mod.c linux-2.6.39.4/arch/x86/mm/mmio-mod.c
20134 --- linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-05-19 00:06:34.000000000 -0400
20135 +++ linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-08-05 19:44:35.000000000 -0400
20136 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20137 break;
20138 default:
20139 {
20140 - unsigned char *ip = (unsigned char *)instptr;
20141 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20142 my_trace->opcode = MMIO_UNKNOWN_OP;
20143 my_trace->width = 0;
20144 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20145 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20146 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20147 void __iomem *addr)
20148 {
20149 - static atomic_t next_id;
20150 + static atomic_unchecked_t next_id;
20151 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20152 /* These are page-unaligned. */
20153 struct mmiotrace_map map = {
20154 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20155 .private = trace
20156 },
20157 .phys = offset,
20158 - .id = atomic_inc_return(&next_id)
20159 + .id = atomic_inc_return_unchecked(&next_id)
20160 };
20161 map.map_id = trace->id;
20162
20163 diff -urNp linux-2.6.39.4/arch/x86/mm/numa_32.c linux-2.6.39.4/arch/x86/mm/numa_32.c
20164 --- linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-05-19 00:06:34.000000000 -0400
20165 +++ linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-08-05 19:44:35.000000000 -0400
20166 @@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int
20167 }
20168 #endif
20169
20170 -extern unsigned long find_max_low_pfn(void);
20171 extern unsigned long highend_pfn, highstart_pfn;
20172
20173 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
20174 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr.c linux-2.6.39.4/arch/x86/mm/pageattr.c
20175 --- linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-05-19 00:06:34.000000000 -0400
20176 +++ linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-08-05 19:44:35.000000000 -0400
20177 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20178 */
20179 #ifdef CONFIG_PCI_BIOS
20180 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20181 - pgprot_val(forbidden) |= _PAGE_NX;
20182 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20183 #endif
20184
20185 /*
20186 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20187 * Does not cover __inittext since that is gone later on. On
20188 * 64bit we do not enforce !NX on the low mapping
20189 */
20190 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20191 - pgprot_val(forbidden) |= _PAGE_NX;
20192 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20193 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20194
20195 +#ifdef CONFIG_DEBUG_RODATA
20196 /*
20197 * The .rodata section needs to be read-only. Using the pfn
20198 * catches all aliases.
20199 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20200 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20201 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20202 pgprot_val(forbidden) |= _PAGE_RW;
20203 +#endif
20204
20205 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20206 /*
20207 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20208 }
20209 #endif
20210
20211 +#ifdef CONFIG_PAX_KERNEXEC
20212 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20213 + pgprot_val(forbidden) |= _PAGE_RW;
20214 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20215 + }
20216 +#endif
20217 +
20218 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20219
20220 return prot;
20221 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20222 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20223 {
20224 /* change init_mm */
20225 + pax_open_kernel();
20226 set_pte_atomic(kpte, pte);
20227 +
20228 #ifdef CONFIG_X86_32
20229 if (!SHARED_KERNEL_PMD) {
20230 +
20231 +#ifdef CONFIG_PAX_PER_CPU_PGD
20232 + unsigned long cpu;
20233 +#else
20234 struct page *page;
20235 +#endif
20236
20237 +#ifdef CONFIG_PAX_PER_CPU_PGD
20238 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20239 + pgd_t *pgd = get_cpu_pgd(cpu);
20240 +#else
20241 list_for_each_entry(page, &pgd_list, lru) {
20242 - pgd_t *pgd;
20243 + pgd_t *pgd = (pgd_t *)page_address(page);
20244 +#endif
20245 +
20246 pud_t *pud;
20247 pmd_t *pmd;
20248
20249 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20250 + pgd += pgd_index(address);
20251 pud = pud_offset(pgd, address);
20252 pmd = pmd_offset(pud, address);
20253 set_pte_atomic((pte_t *)pmd, pte);
20254 }
20255 }
20256 #endif
20257 + pax_close_kernel();
20258 }
20259
20260 static int
20261 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr-test.c linux-2.6.39.4/arch/x86/mm/pageattr-test.c
20262 --- linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-05-19 00:06:34.000000000 -0400
20263 +++ linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-08-05 19:44:35.000000000 -0400
20264 @@ -36,7 +36,7 @@ enum {
20265
20266 static int pte_testbit(pte_t pte)
20267 {
20268 - return pte_flags(pte) & _PAGE_UNUSED1;
20269 + return pte_flags(pte) & _PAGE_CPA_TEST;
20270 }
20271
20272 struct split_state {
20273 diff -urNp linux-2.6.39.4/arch/x86/mm/pat.c linux-2.6.39.4/arch/x86/mm/pat.c
20274 --- linux-2.6.39.4/arch/x86/mm/pat.c 2011-05-19 00:06:34.000000000 -0400
20275 +++ linux-2.6.39.4/arch/x86/mm/pat.c 2011-08-05 19:44:35.000000000 -0400
20276 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20277
20278 if (!entry) {
20279 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20280 - current->comm, current->pid, start, end);
20281 + current->comm, task_pid_nr(current), start, end);
20282 return -EINVAL;
20283 }
20284
20285 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20286 while (cursor < to) {
20287 if (!devmem_is_allowed(pfn)) {
20288 printk(KERN_INFO
20289 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20290 - current->comm, from, to);
20291 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20292 + current->comm, from, to, cursor);
20293 return 0;
20294 }
20295 cursor += PAGE_SIZE;
20296 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20297 printk(KERN_INFO
20298 "%s:%d ioremap_change_attr failed %s "
20299 "for %Lx-%Lx\n",
20300 - current->comm, current->pid,
20301 + current->comm, task_pid_nr(current),
20302 cattr_name(flags),
20303 base, (unsigned long long)(base + size));
20304 return -EINVAL;
20305 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20306 if (want_flags != flags) {
20307 printk(KERN_WARNING
20308 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20309 - current->comm, current->pid,
20310 + current->comm, task_pid_nr(current),
20311 cattr_name(want_flags),
20312 (unsigned long long)paddr,
20313 (unsigned long long)(paddr + size),
20314 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20315 free_memtype(paddr, paddr + size);
20316 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20317 " for %Lx-%Lx, got %s\n",
20318 - current->comm, current->pid,
20319 + current->comm, task_pid_nr(current),
20320 cattr_name(want_flags),
20321 (unsigned long long)paddr,
20322 (unsigned long long)(paddr + size),
20323 diff -urNp linux-2.6.39.4/arch/x86/mm/pf_in.c linux-2.6.39.4/arch/x86/mm/pf_in.c
20324 --- linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-05-19 00:06:34.000000000 -0400
20325 +++ linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-08-05 19:44:35.000000000 -0400
20326 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20327 int i;
20328 enum reason_type rv = OTHERS;
20329
20330 - p = (unsigned char *)ins_addr;
20331 + p = (unsigned char *)ktla_ktva(ins_addr);
20332 p += skip_prefix(p, &prf);
20333 p += get_opcode(p, &opcode);
20334
20335 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20336 struct prefix_bits prf;
20337 int i;
20338
20339 - p = (unsigned char *)ins_addr;
20340 + p = (unsigned char *)ktla_ktva(ins_addr);
20341 p += skip_prefix(p, &prf);
20342 p += get_opcode(p, &opcode);
20343
20344 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20345 struct prefix_bits prf;
20346 int i;
20347
20348 - p = (unsigned char *)ins_addr;
20349 + p = (unsigned char *)ktla_ktva(ins_addr);
20350 p += skip_prefix(p, &prf);
20351 p += get_opcode(p, &opcode);
20352
20353 @@ -416,7 +416,7 @@ unsigned long get_ins_reg_val(unsigned l
20354 int i;
20355 unsigned long rv;
20356
20357 - p = (unsigned char *)ins_addr;
20358 + p = (unsigned char *)ktla_ktva(ins_addr);
20359 p += skip_prefix(p, &prf);
20360 p += get_opcode(p, &opcode);
20361 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20362 @@ -476,7 +476,7 @@ unsigned long get_ins_imm_val(unsigned l
20363 int i;
20364 unsigned long rv;
20365
20366 - p = (unsigned char *)ins_addr;
20367 + p = (unsigned char *)ktla_ktva(ins_addr);
20368 p += skip_prefix(p, &prf);
20369 p += get_opcode(p, &opcode);
20370 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20371 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable_32.c linux-2.6.39.4/arch/x86/mm/pgtable_32.c
20372 --- linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-05-19 00:06:34.000000000 -0400
20373 +++ linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-08-05 19:44:35.000000000 -0400
20374 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20375 return;
20376 }
20377 pte = pte_offset_kernel(pmd, vaddr);
20378 +
20379 + pax_open_kernel();
20380 if (pte_val(pteval))
20381 set_pte_at(&init_mm, vaddr, pte, pteval);
20382 else
20383 pte_clear(&init_mm, vaddr, pte);
20384 + pax_close_kernel();
20385
20386 /*
20387 * It's enough to flush this one mapping.
20388 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable.c linux-2.6.39.4/arch/x86/mm/pgtable.c
20389 --- linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-05-19 00:06:34.000000000 -0400
20390 +++ linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-08-05 19:44:35.000000000 -0400
20391 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20392 list_del(&page->lru);
20393 }
20394
20395 -#define UNSHARED_PTRS_PER_PGD \
20396 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20397 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20398 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20399
20400 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20401 +{
20402 + while (count--)
20403 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20404 +}
20405 +#endif
20406 +
20407 +#ifdef CONFIG_PAX_PER_CPU_PGD
20408 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20409 +{
20410 + while (count--)
20411 +
20412 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20413 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20414 +#else
20415 + *dst++ = *src++;
20416 +#endif
20417
20418 +}
20419 +#endif
20420 +
20421 +#ifdef CONFIG_X86_64
20422 +#define pxd_t pud_t
20423 +#define pyd_t pgd_t
20424 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20425 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20426 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20427 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20428 +#define PYD_SIZE PGDIR_SIZE
20429 +#else
20430 +#define pxd_t pmd_t
20431 +#define pyd_t pud_t
20432 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20433 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20434 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20435 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20436 +#define PYD_SIZE PUD_SIZE
20437 +#endif
20438 +
20439 +#ifdef CONFIG_PAX_PER_CPU_PGD
20440 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20441 +static inline void pgd_dtor(pgd_t *pgd) {}
20442 +#else
20443 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20444 {
20445 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20446 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20447 pgd_list_del(pgd);
20448 spin_unlock(&pgd_lock);
20449 }
20450 +#endif
20451
20452 /*
20453 * List of all pgd's needed for non-PAE so it can invalidate entries
20454 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20455 * -- wli
20456 */
20457
20458 -#ifdef CONFIG_X86_PAE
20459 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20460 /*
20461 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20462 * updating the top-level pagetable entries to guarantee the
20463 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20464 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20465 * and initialize the kernel pmds here.
20466 */
20467 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20468 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20469
20470 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20471 {
20472 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20473 */
20474 flush_tlb_mm(mm);
20475 }
20476 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20477 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20478 #else /* !CONFIG_X86_PAE */
20479
20480 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20481 -#define PREALLOCATED_PMDS 0
20482 +#define PREALLOCATED_PXDS 0
20483
20484 #endif /* CONFIG_X86_PAE */
20485
20486 -static void free_pmds(pmd_t *pmds[])
20487 +static void free_pxds(pxd_t *pxds[])
20488 {
20489 int i;
20490
20491 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20492 - if (pmds[i])
20493 - free_page((unsigned long)pmds[i]);
20494 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20495 + if (pxds[i])
20496 + free_page((unsigned long)pxds[i]);
20497 }
20498
20499 -static int preallocate_pmds(pmd_t *pmds[])
20500 +static int preallocate_pxds(pxd_t *pxds[])
20501 {
20502 int i;
20503 bool failed = false;
20504
20505 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20506 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20507 - if (pmd == NULL)
20508 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20509 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20510 + if (pxd == NULL)
20511 failed = true;
20512 - pmds[i] = pmd;
20513 + pxds[i] = pxd;
20514 }
20515
20516 if (failed) {
20517 - free_pmds(pmds);
20518 + free_pxds(pxds);
20519 return -ENOMEM;
20520 }
20521
20522 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20523 * preallocate which never got a corresponding vma will need to be
20524 * freed manually.
20525 */
20526 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20527 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20528 {
20529 int i;
20530
20531 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20532 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20533 pgd_t pgd = pgdp[i];
20534
20535 if (pgd_val(pgd) != 0) {
20536 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20537 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20538
20539 - pgdp[i] = native_make_pgd(0);
20540 + set_pgd(pgdp + i, native_make_pgd(0));
20541
20542 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20543 - pmd_free(mm, pmd);
20544 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20545 + pxd_free(mm, pxd);
20546 }
20547 }
20548 }
20549
20550 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20551 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20552 {
20553 - pud_t *pud;
20554 + pyd_t *pyd;
20555 unsigned long addr;
20556 int i;
20557
20558 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20559 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20560 return;
20561
20562 - pud = pud_offset(pgd, 0);
20563 +#ifdef CONFIG_X86_64
20564 + pyd = pyd_offset(mm, 0L);
20565 +#else
20566 + pyd = pyd_offset(pgd, 0L);
20567 +#endif
20568
20569 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20570 - i++, pud++, addr += PUD_SIZE) {
20571 - pmd_t *pmd = pmds[i];
20572 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20573 + i++, pyd++, addr += PYD_SIZE) {
20574 + pxd_t *pxd = pxds[i];
20575
20576 if (i >= KERNEL_PGD_BOUNDARY)
20577 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20578 - sizeof(pmd_t) * PTRS_PER_PMD);
20579 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20580 + sizeof(pxd_t) * PTRS_PER_PMD);
20581
20582 - pud_populate(mm, pud, pmd);
20583 + pyd_populate(mm, pyd, pxd);
20584 }
20585 }
20586
20587 pgd_t *pgd_alloc(struct mm_struct *mm)
20588 {
20589 pgd_t *pgd;
20590 - pmd_t *pmds[PREALLOCATED_PMDS];
20591 + pxd_t *pxds[PREALLOCATED_PXDS];
20592
20593 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20594
20595 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20596
20597 mm->pgd = pgd;
20598
20599 - if (preallocate_pmds(pmds) != 0)
20600 + if (preallocate_pxds(pxds) != 0)
20601 goto out_free_pgd;
20602
20603 if (paravirt_pgd_alloc(mm) != 0)
20604 - goto out_free_pmds;
20605 + goto out_free_pxds;
20606
20607 /*
20608 * Make sure that pre-populating the pmds is atomic with
20609 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20610 spin_lock(&pgd_lock);
20611
20612 pgd_ctor(mm, pgd);
20613 - pgd_prepopulate_pmd(mm, pgd, pmds);
20614 + pgd_prepopulate_pxd(mm, pgd, pxds);
20615
20616 spin_unlock(&pgd_lock);
20617
20618 return pgd;
20619
20620 -out_free_pmds:
20621 - free_pmds(pmds);
20622 +out_free_pxds:
20623 + free_pxds(pxds);
20624 out_free_pgd:
20625 free_page((unsigned long)pgd);
20626 out:
20627 @@ -295,7 +344,7 @@ out:
20628
20629 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20630 {
20631 - pgd_mop_up_pmds(mm, pgd);
20632 + pgd_mop_up_pxds(mm, pgd);
20633 pgd_dtor(pgd);
20634 paravirt_pgd_free(mm, pgd);
20635 free_page((unsigned long)pgd);
20636 diff -urNp linux-2.6.39.4/arch/x86/mm/setup_nx.c linux-2.6.39.4/arch/x86/mm/setup_nx.c
20637 --- linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-05-19 00:06:34.000000000 -0400
20638 +++ linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-08-05 19:44:35.000000000 -0400
20639 @@ -5,8 +5,10 @@
20640 #include <asm/pgtable.h>
20641 #include <asm/proto.h>
20642
20643 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20644 static int disable_nx __cpuinitdata;
20645
20646 +#ifndef CONFIG_PAX_PAGEEXEC
20647 /*
20648 * noexec = on|off
20649 *
20650 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20651 return 0;
20652 }
20653 early_param("noexec", noexec_setup);
20654 +#endif
20655 +
20656 +#endif
20657
20658 void __cpuinit x86_configure_nx(void)
20659 {
20660 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20661 if (cpu_has_nx && !disable_nx)
20662 __supported_pte_mask |= _PAGE_NX;
20663 else
20664 +#endif
20665 __supported_pte_mask &= ~_PAGE_NX;
20666 }
20667
20668 diff -urNp linux-2.6.39.4/arch/x86/mm/tlb.c linux-2.6.39.4/arch/x86/mm/tlb.c
20669 --- linux-2.6.39.4/arch/x86/mm/tlb.c 2011-05-19 00:06:34.000000000 -0400
20670 +++ linux-2.6.39.4/arch/x86/mm/tlb.c 2011-08-05 19:44:35.000000000 -0400
20671 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20672 BUG();
20673 cpumask_clear_cpu(cpu,
20674 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20675 +
20676 +#ifndef CONFIG_PAX_PER_CPU_PGD
20677 load_cr3(swapper_pg_dir);
20678 +#endif
20679 +
20680 }
20681 EXPORT_SYMBOL_GPL(leave_mm);
20682
20683 diff -urNp linux-2.6.39.4/arch/x86/oprofile/backtrace.c linux-2.6.39.4/arch/x86/oprofile/backtrace.c
20684 --- linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-05-19 00:06:34.000000000 -0400
20685 +++ linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-08-05 19:44:35.000000000 -0400
20686 @@ -57,7 +57,7 @@ dump_user_backtrace_32(struct stack_fram
20687 struct stack_frame_ia32 *fp;
20688
20689 /* Also check accessibility of one struct frame_head beyond */
20690 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
20691 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
20692 return NULL;
20693 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
20694 return NULL;
20695 @@ -123,7 +123,7 @@ x86_backtrace(struct pt_regs * const reg
20696 {
20697 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20698
20699 - if (!user_mode_vm(regs)) {
20700 + if (!user_mode(regs)) {
20701 unsigned long stack = kernel_stack_pointer(regs);
20702 if (depth)
20703 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20704 diff -urNp linux-2.6.39.4/arch/x86/pci/mrst.c linux-2.6.39.4/arch/x86/pci/mrst.c
20705 --- linux-2.6.39.4/arch/x86/pci/mrst.c 2011-05-19 00:06:34.000000000 -0400
20706 +++ linux-2.6.39.4/arch/x86/pci/mrst.c 2011-08-05 20:34:06.000000000 -0400
20707 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20708 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20709 pci_mmcfg_late_init();
20710 pcibios_enable_irq = mrst_pci_irq_enable;
20711 - pci_root_ops = pci_mrst_ops;
20712 + pax_open_kernel();
20713 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20714 + pax_close_kernel();
20715 /* Continue with standard init */
20716 return 1;
20717 }
20718 diff -urNp linux-2.6.39.4/arch/x86/pci/pcbios.c linux-2.6.39.4/arch/x86/pci/pcbios.c
20719 --- linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-05-19 00:06:34.000000000 -0400
20720 +++ linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-08-05 20:34:06.000000000 -0400
20721 @@ -79,50 +79,93 @@ union bios32 {
20722 static struct {
20723 unsigned long address;
20724 unsigned short segment;
20725 -} bios32_indirect = { 0, __KERNEL_CS };
20726 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20727
20728 /*
20729 * Returns the entry point for the given service, NULL on error
20730 */
20731
20732 -static unsigned long bios32_service(unsigned long service)
20733 +static unsigned long __devinit bios32_service(unsigned long service)
20734 {
20735 unsigned char return_code; /* %al */
20736 unsigned long address; /* %ebx */
20737 unsigned long length; /* %ecx */
20738 unsigned long entry; /* %edx */
20739 unsigned long flags;
20740 + struct desc_struct d, *gdt;
20741
20742 local_irq_save(flags);
20743 - __asm__("lcall *(%%edi); cld"
20744 +
20745 + gdt = get_cpu_gdt_table(smp_processor_id());
20746 +
20747 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20748 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20749 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20750 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20751 +
20752 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20753 : "=a" (return_code),
20754 "=b" (address),
20755 "=c" (length),
20756 "=d" (entry)
20757 : "0" (service),
20758 "1" (0),
20759 - "D" (&bios32_indirect));
20760 + "D" (&bios32_indirect),
20761 + "r"(__PCIBIOS_DS)
20762 + : "memory");
20763 +
20764 + pax_open_kernel();
20765 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20766 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20767 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20768 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20769 + pax_close_kernel();
20770 +
20771 local_irq_restore(flags);
20772
20773 switch (return_code) {
20774 - case 0:
20775 - return address + entry;
20776 - case 0x80: /* Not present */
20777 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20778 - return 0;
20779 - default: /* Shouldn't happen */
20780 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20781 - service, return_code);
20782 + case 0: {
20783 + int cpu;
20784 + unsigned char flags;
20785 +
20786 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20787 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20788 + printk(KERN_WARNING "bios32_service: not valid\n");
20789 return 0;
20790 + }
20791 + address = address + PAGE_OFFSET;
20792 + length += 16UL; /* some BIOSs underreport this... */
20793 + flags = 4;
20794 + if (length >= 64*1024*1024) {
20795 + length >>= PAGE_SHIFT;
20796 + flags |= 8;
20797 + }
20798 +
20799 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20800 + gdt = get_cpu_gdt_table(cpu);
20801 + pack_descriptor(&d, address, length, 0x9b, flags);
20802 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20803 + pack_descriptor(&d, address, length, 0x93, flags);
20804 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20805 + }
20806 + return entry;
20807 + }
20808 + case 0x80: /* Not present */
20809 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20810 + return 0;
20811 + default: /* Shouldn't happen */
20812 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20813 + service, return_code);
20814 + return 0;
20815 }
20816 }
20817
20818 static struct {
20819 unsigned long address;
20820 unsigned short segment;
20821 -} pci_indirect = { 0, __KERNEL_CS };
20822 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20823
20824 -static int pci_bios_present;
20825 +static int pci_bios_present __read_only;
20826
20827 static int __devinit check_pcibios(void)
20828 {
20829 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20830 unsigned long flags, pcibios_entry;
20831
20832 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20833 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20834 + pci_indirect.address = pcibios_entry;
20835
20836 local_irq_save(flags);
20837 - __asm__(
20838 - "lcall *(%%edi); cld\n\t"
20839 + __asm__("movw %w6, %%ds\n\t"
20840 + "lcall *%%ss:(%%edi); cld\n\t"
20841 + "push %%ss\n\t"
20842 + "pop %%ds\n\t"
20843 "jc 1f\n\t"
20844 "xor %%ah, %%ah\n"
20845 "1:"
20846 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20847 "=b" (ebx),
20848 "=c" (ecx)
20849 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20850 - "D" (&pci_indirect)
20851 + "D" (&pci_indirect),
20852 + "r" (__PCIBIOS_DS)
20853 : "memory");
20854 local_irq_restore(flags);
20855
20856 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20857
20858 switch (len) {
20859 case 1:
20860 - __asm__("lcall *(%%esi); cld\n\t"
20861 + __asm__("movw %w6, %%ds\n\t"
20862 + "lcall *%%ss:(%%esi); cld\n\t"
20863 + "push %%ss\n\t"
20864 + "pop %%ds\n\t"
20865 "jc 1f\n\t"
20866 "xor %%ah, %%ah\n"
20867 "1:"
20868 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20869 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20870 "b" (bx),
20871 "D" ((long)reg),
20872 - "S" (&pci_indirect));
20873 + "S" (&pci_indirect),
20874 + "r" (__PCIBIOS_DS));
20875 /*
20876 * Zero-extend the result beyond 8 bits, do not trust the
20877 * BIOS having done it:
20878 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20879 *value &= 0xff;
20880 break;
20881 case 2:
20882 - __asm__("lcall *(%%esi); cld\n\t"
20883 + __asm__("movw %w6, %%ds\n\t"
20884 + "lcall *%%ss:(%%esi); cld\n\t"
20885 + "push %%ss\n\t"
20886 + "pop %%ds\n\t"
20887 "jc 1f\n\t"
20888 "xor %%ah, %%ah\n"
20889 "1:"
20890 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20891 : "1" (PCIBIOS_READ_CONFIG_WORD),
20892 "b" (bx),
20893 "D" ((long)reg),
20894 - "S" (&pci_indirect));
20895 + "S" (&pci_indirect),
20896 + "r" (__PCIBIOS_DS));
20897 /*
20898 * Zero-extend the result beyond 16 bits, do not trust the
20899 * BIOS having done it:
20900 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20901 *value &= 0xffff;
20902 break;
20903 case 4:
20904 - __asm__("lcall *(%%esi); cld\n\t"
20905 + __asm__("movw %w6, %%ds\n\t"
20906 + "lcall *%%ss:(%%esi); cld\n\t"
20907 + "push %%ss\n\t"
20908 + "pop %%ds\n\t"
20909 "jc 1f\n\t"
20910 "xor %%ah, %%ah\n"
20911 "1:"
20912 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20913 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20914 "b" (bx),
20915 "D" ((long)reg),
20916 - "S" (&pci_indirect));
20917 + "S" (&pci_indirect),
20918 + "r" (__PCIBIOS_DS));
20919 break;
20920 }
20921
20922 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20923
20924 switch (len) {
20925 case 1:
20926 - __asm__("lcall *(%%esi); cld\n\t"
20927 + __asm__("movw %w6, %%ds\n\t"
20928 + "lcall *%%ss:(%%esi); cld\n\t"
20929 + "push %%ss\n\t"
20930 + "pop %%ds\n\t"
20931 "jc 1f\n\t"
20932 "xor %%ah, %%ah\n"
20933 "1:"
20934 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20935 "c" (value),
20936 "b" (bx),
20937 "D" ((long)reg),
20938 - "S" (&pci_indirect));
20939 + "S" (&pci_indirect),
20940 + "r" (__PCIBIOS_DS));
20941 break;
20942 case 2:
20943 - __asm__("lcall *(%%esi); cld\n\t"
20944 + __asm__("movw %w6, %%ds\n\t"
20945 + "lcall *%%ss:(%%esi); cld\n\t"
20946 + "push %%ss\n\t"
20947 + "pop %%ds\n\t"
20948 "jc 1f\n\t"
20949 "xor %%ah, %%ah\n"
20950 "1:"
20951 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20952 "c" (value),
20953 "b" (bx),
20954 "D" ((long)reg),
20955 - "S" (&pci_indirect));
20956 + "S" (&pci_indirect),
20957 + "r" (__PCIBIOS_DS));
20958 break;
20959 case 4:
20960 - __asm__("lcall *(%%esi); cld\n\t"
20961 + __asm__("movw %w6, %%ds\n\t"
20962 + "lcall *%%ss:(%%esi); cld\n\t"
20963 + "push %%ss\n\t"
20964 + "pop %%ds\n\t"
20965 "jc 1f\n\t"
20966 "xor %%ah, %%ah\n"
20967 "1:"
20968 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20969 "c" (value),
20970 "b" (bx),
20971 "D" ((long)reg),
20972 - "S" (&pci_indirect));
20973 + "S" (&pci_indirect),
20974 + "r" (__PCIBIOS_DS));
20975 break;
20976 }
20977
20978 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20979
20980 DBG("PCI: Fetching IRQ routing table... ");
20981 __asm__("push %%es\n\t"
20982 + "movw %w8, %%ds\n\t"
20983 "push %%ds\n\t"
20984 "pop %%es\n\t"
20985 - "lcall *(%%esi); cld\n\t"
20986 + "lcall *%%ss:(%%esi); cld\n\t"
20987 "pop %%es\n\t"
20988 + "push %%ss\n\t"
20989 + "pop %%ds\n"
20990 "jc 1f\n\t"
20991 "xor %%ah, %%ah\n"
20992 "1:"
20993 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20994 "1" (0),
20995 "D" ((long) &opt),
20996 "S" (&pci_indirect),
20997 - "m" (opt)
20998 + "m" (opt),
20999 + "r" (__PCIBIOS_DS)
21000 : "memory");
21001 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21002 if (ret & 0xff00)
21003 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21004 {
21005 int ret;
21006
21007 - __asm__("lcall *(%%esi); cld\n\t"
21008 + __asm__("movw %w5, %%ds\n\t"
21009 + "lcall *%%ss:(%%esi); cld\n\t"
21010 + "push %%ss\n\t"
21011 + "pop %%ds\n"
21012 "jc 1f\n\t"
21013 "xor %%ah, %%ah\n"
21014 "1:"
21015 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21016 : "0" (PCIBIOS_SET_PCI_HW_INT),
21017 "b" ((dev->bus->number << 8) | dev->devfn),
21018 "c" ((irq << 8) | (pin + 10)),
21019 - "S" (&pci_indirect));
21020 + "S" (&pci_indirect),
21021 + "r" (__PCIBIOS_DS));
21022 return !(ret & 0xff00);
21023 }
21024 EXPORT_SYMBOL(pcibios_set_irq_routing);
21025 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_32.c linux-2.6.39.4/arch/x86/platform/efi/efi_32.c
21026 --- linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-05-19 00:06:34.000000000 -0400
21027 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-08-05 19:44:35.000000000 -0400
21028 @@ -38,70 +38,37 @@
21029 */
21030
21031 static unsigned long efi_rt_eflags;
21032 -static pgd_t efi_bak_pg_dir_pointer[2];
21033 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21034
21035 -void efi_call_phys_prelog(void)
21036 +void __init efi_call_phys_prelog(void)
21037 {
21038 - unsigned long cr4;
21039 - unsigned long temp;
21040 struct desc_ptr gdt_descr;
21041
21042 local_irq_save(efi_rt_eflags);
21043
21044 - /*
21045 - * If I don't have PAE, I should just duplicate two entries in page
21046 - * directory. If I have PAE, I just need to duplicate one entry in
21047 - * page directory.
21048 - */
21049 - cr4 = read_cr4_safe();
21050 -
21051 - if (cr4 & X86_CR4_PAE) {
21052 - efi_bak_pg_dir_pointer[0].pgd =
21053 - swapper_pg_dir[pgd_index(0)].pgd;
21054 - swapper_pg_dir[0].pgd =
21055 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21056 - } else {
21057 - efi_bak_pg_dir_pointer[0].pgd =
21058 - swapper_pg_dir[pgd_index(0)].pgd;
21059 - efi_bak_pg_dir_pointer[1].pgd =
21060 - swapper_pg_dir[pgd_index(0x400000)].pgd;
21061 - swapper_pg_dir[pgd_index(0)].pgd =
21062 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21063 - temp = PAGE_OFFSET + 0x400000;
21064 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21065 - swapper_pg_dir[pgd_index(temp)].pgd;
21066 - }
21067 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21068 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21069 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21070
21071 /*
21072 * After the lock is released, the original page table is restored.
21073 */
21074 __flush_tlb_all();
21075
21076 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
21077 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
21078 gdt_descr.size = GDT_SIZE - 1;
21079 load_gdt(&gdt_descr);
21080 }
21081
21082 -void efi_call_phys_epilog(void)
21083 +void __init efi_call_phys_epilog(void)
21084 {
21085 - unsigned long cr4;
21086 struct desc_ptr gdt_descr;
21087
21088 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21089 + gdt_descr.address = get_cpu_gdt_table(0);
21090 gdt_descr.size = GDT_SIZE - 1;
21091 load_gdt(&gdt_descr);
21092
21093 - cr4 = read_cr4_safe();
21094 -
21095 - if (cr4 & X86_CR4_PAE) {
21096 - swapper_pg_dir[pgd_index(0)].pgd =
21097 - efi_bak_pg_dir_pointer[0].pgd;
21098 - } else {
21099 - swapper_pg_dir[pgd_index(0)].pgd =
21100 - efi_bak_pg_dir_pointer[0].pgd;
21101 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21102 - efi_bak_pg_dir_pointer[1].pgd;
21103 - }
21104 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21105
21106 /*
21107 * After the lock is released, the original page table is restored.
21108 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S
21109 --- linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-05-19 00:06:34.000000000 -0400
21110 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-05 19:44:35.000000000 -0400
21111 @@ -6,6 +6,7 @@
21112 */
21113
21114 #include <linux/linkage.h>
21115 +#include <linux/init.h>
21116 #include <asm/page_types.h>
21117
21118 /*
21119 @@ -20,7 +21,7 @@
21120 * service functions will comply with gcc calling convention, too.
21121 */
21122
21123 -.text
21124 +__INIT
21125 ENTRY(efi_call_phys)
21126 /*
21127 * 0. The function can only be called in Linux kernel. So CS has been
21128 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
21129 * The mapping of lower virtual memory has been created in prelog and
21130 * epilog.
21131 */
21132 - movl $1f, %edx
21133 - subl $__PAGE_OFFSET, %edx
21134 - jmp *%edx
21135 + jmp 1f-__PAGE_OFFSET
21136 1:
21137
21138 /*
21139 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
21140 * parameter 2, ..., param n. To make things easy, we save the return
21141 * address of efi_call_phys in a global variable.
21142 */
21143 - popl %edx
21144 - movl %edx, saved_return_addr
21145 - /* get the function pointer into ECX*/
21146 - popl %ecx
21147 - movl %ecx, efi_rt_function_ptr
21148 - movl $2f, %edx
21149 - subl $__PAGE_OFFSET, %edx
21150 - pushl %edx
21151 + popl (saved_return_addr)
21152 + popl (efi_rt_function_ptr)
21153
21154 /*
21155 * 3. Clear PG bit in %CR0.
21156 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21157 /*
21158 * 5. Call the physical function.
21159 */
21160 - jmp *%ecx
21161 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21162
21163 -2:
21164 /*
21165 * 6. After EFI runtime service returns, control will return to
21166 * following instruction. We'd better readjust stack pointer first.
21167 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21168 movl %cr0, %edx
21169 orl $0x80000000, %edx
21170 movl %edx, %cr0
21171 - jmp 1f
21172 -1:
21173 +
21174 /*
21175 * 8. Now restore the virtual mode from flat mode by
21176 * adding EIP with PAGE_OFFSET.
21177 */
21178 - movl $1f, %edx
21179 - jmp *%edx
21180 + jmp 1f+__PAGE_OFFSET
21181 1:
21182
21183 /*
21184 * 9. Balance the stack. And because EAX contain the return value,
21185 * we'd better not clobber it.
21186 */
21187 - leal efi_rt_function_ptr, %edx
21188 - movl (%edx), %ecx
21189 - pushl %ecx
21190 + pushl (efi_rt_function_ptr)
21191
21192 /*
21193 - * 10. Push the saved return address onto the stack and return.
21194 + * 10. Return to the saved return address.
21195 */
21196 - leal saved_return_addr, %edx
21197 - movl (%edx), %ecx
21198 - pushl %ecx
21199 - ret
21200 + jmpl *(saved_return_addr)
21201 ENDPROC(efi_call_phys)
21202 .previous
21203
21204 -.data
21205 +__INITDATA
21206 saved_return_addr:
21207 .long 0
21208 efi_rt_function_ptr:
21209 diff -urNp linux-2.6.39.4/arch/x86/platform/mrst/mrst.c linux-2.6.39.4/arch/x86/platform/mrst/mrst.c
21210 --- linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-05-19 00:06:34.000000000 -0400
21211 +++ linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-08-05 20:34:06.000000000 -0400
21212 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21213 }
21214
21215 /* Reboot and power off are handled by the SCU on a MID device */
21216 -static void mrst_power_off(void)
21217 +static __noreturn void mrst_power_off(void)
21218 {
21219 intel_scu_ipc_simple_command(0xf1, 1);
21220 + BUG();
21221 }
21222
21223 -static void mrst_reboot(void)
21224 +static __noreturn void mrst_reboot(void)
21225 {
21226 intel_scu_ipc_simple_command(0xf1, 0);
21227 + BUG();
21228 }
21229
21230 /*
21231 diff -urNp linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c
21232 --- linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-05-19 00:06:34.000000000 -0400
21233 +++ linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-08-05 19:44:35.000000000 -0400
21234 @@ -342,6 +342,8 @@ static void uv_reset_with_ipi(struct bau
21235 cpumask_t mask;
21236 struct reset_args reset_args;
21237
21238 + pax_track_stack();
21239 +
21240 reset_args.sender = sender;
21241
21242 cpus_clear(mask);
21243 diff -urNp linux-2.6.39.4/arch/x86/power/cpu.c linux-2.6.39.4/arch/x86/power/cpu.c
21244 --- linux-2.6.39.4/arch/x86/power/cpu.c 2011-05-19 00:06:34.000000000 -0400
21245 +++ linux-2.6.39.4/arch/x86/power/cpu.c 2011-08-05 19:44:35.000000000 -0400
21246 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21247 static void fix_processor_context(void)
21248 {
21249 int cpu = smp_processor_id();
21250 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21251 + struct tss_struct *t = init_tss + cpu;
21252
21253 set_tss_desc(cpu, t); /*
21254 * This just modifies memory; should not be
21255 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21256 */
21257
21258 #ifdef CONFIG_X86_64
21259 + pax_open_kernel();
21260 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21261 + pax_close_kernel();
21262
21263 syscall_init(); /* This sets MSR_*STAR and related */
21264 #endif
21265 Binary files linux-2.6.39.4/arch/x86/tools/test_get_len and linux-2.6.39.4/arch/x86/tools/test_get_len differ
21266 diff -urNp linux-2.6.39.4/arch/x86/vdso/Makefile linux-2.6.39.4/arch/x86/vdso/Makefile
21267 --- linux-2.6.39.4/arch/x86/vdso/Makefile 2011-05-19 00:06:34.000000000 -0400
21268 +++ linux-2.6.39.4/arch/x86/vdso/Makefile 2011-08-05 19:44:35.000000000 -0400
21269 @@ -123,7 +123,7 @@ quiet_cmd_vdso = VDSO $@
21270 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21271 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21272
21273 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21274 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21275 GCOV_PROFILE := n
21276
21277 #
21278 diff -urNp linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c
21279 --- linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-05-19 00:06:34.000000000 -0400
21280 +++ linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-08-05 19:44:35.000000000 -0400
21281 @@ -22,24 +22,48 @@
21282 #include <asm/hpet.h>
21283 #include <asm/unistd.h>
21284 #include <asm/io.h>
21285 +#include <asm/fixmap.h>
21286 #include "vextern.h"
21287
21288 #define gtod vdso_vsyscall_gtod_data
21289
21290 +notrace noinline long __vdso_fallback_time(long *t)
21291 +{
21292 + long secs;
21293 + asm volatile("syscall"
21294 + : "=a" (secs)
21295 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
21296 + return secs;
21297 +}
21298 +
21299 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
21300 {
21301 long ret;
21302 asm("syscall" : "=a" (ret) :
21303 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
21304 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
21305 return ret;
21306 }
21307
21308 +notrace static inline cycle_t __vdso_vread_hpet(void)
21309 +{
21310 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
21311 +}
21312 +
21313 +notrace static inline cycle_t __vdso_vread_tsc(void)
21314 +{
21315 + cycle_t ret = (cycle_t)vget_cycles();
21316 +
21317 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
21318 +}
21319 +
21320 notrace static inline long vgetns(void)
21321 {
21322 long v;
21323 - cycles_t (*vread)(void);
21324 - vread = gtod->clock.vread;
21325 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
21326 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
21327 + v = __vdso_vread_tsc();
21328 + else
21329 + v = __vdso_vread_hpet();
21330 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
21331 return (v * gtod->clock.mult) >> gtod->clock.shift;
21332 }
21333
21334 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
21335
21336 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
21337 {
21338 - if (likely(gtod->sysctl_enabled))
21339 + if (likely(gtod->sysctl_enabled &&
21340 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21341 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21342 switch (clock) {
21343 case CLOCK_REALTIME:
21344 if (likely(gtod->clock.vread))
21345 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
21346 int clock_gettime(clockid_t, struct timespec *)
21347 __attribute__((weak, alias("__vdso_clock_gettime")));
21348
21349 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21350 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
21351 {
21352 long ret;
21353 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
21354 + asm("syscall" : "=a" (ret) :
21355 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
21356 + return ret;
21357 +}
21358 +
21359 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21360 +{
21361 + if (likely(gtod->sysctl_enabled &&
21362 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21363 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21364 + {
21365 if (likely(tv != NULL)) {
21366 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
21367 offsetof(struct timespec, tv_nsec) ||
21368 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
21369 }
21370 return 0;
21371 }
21372 - asm("syscall" : "=a" (ret) :
21373 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
21374 - return ret;
21375 + return __vdso_fallback_gettimeofday(tv, tz);
21376 }
21377 int gettimeofday(struct timeval *, struct timezone *)
21378 __attribute__((weak, alias("__vdso_gettimeofday")));
21379 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c
21380 --- linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-05-19 00:06:34.000000000 -0400
21381 +++ linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-08-05 19:44:35.000000000 -0400
21382 @@ -25,6 +25,7 @@
21383 #include <asm/tlbflush.h>
21384 #include <asm/vdso.h>
21385 #include <asm/proto.h>
21386 +#include <asm/mman.h>
21387
21388 enum {
21389 VDSO_DISABLED = 0,
21390 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21391 void enable_sep_cpu(void)
21392 {
21393 int cpu = get_cpu();
21394 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21395 + struct tss_struct *tss = init_tss + cpu;
21396
21397 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21398 put_cpu();
21399 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21400 gate_vma.vm_start = FIXADDR_USER_START;
21401 gate_vma.vm_end = FIXADDR_USER_END;
21402 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21403 - gate_vma.vm_page_prot = __P101;
21404 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21405 /*
21406 * Make sure the vDSO gets into every core dump.
21407 * Dumping its contents makes post-mortem fully interpretable later
21408 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21409 if (compat)
21410 addr = VDSO_HIGH_BASE;
21411 else {
21412 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21413 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21414 if (IS_ERR_VALUE(addr)) {
21415 ret = addr;
21416 goto up_fail;
21417 }
21418 }
21419
21420 - current->mm->context.vdso = (void *)addr;
21421 + current->mm->context.vdso = addr;
21422
21423 if (compat_uses_vma || !compat) {
21424 /*
21425 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21426 }
21427
21428 current_thread_info()->sysenter_return =
21429 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21430 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21431
21432 up_fail:
21433 if (ret)
21434 - current->mm->context.vdso = NULL;
21435 + current->mm->context.vdso = 0;
21436
21437 up_write(&mm->mmap_sem);
21438
21439 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21440
21441 const char *arch_vma_name(struct vm_area_struct *vma)
21442 {
21443 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21444 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21445 return "[vdso]";
21446 +
21447 +#ifdef CONFIG_PAX_SEGMEXEC
21448 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21449 + return "[vdso]";
21450 +#endif
21451 +
21452 return NULL;
21453 }
21454
21455 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21456 * Check to see if the corresponding task was created in compat vdso
21457 * mode.
21458 */
21459 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21460 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21461 return &gate_vma;
21462 return NULL;
21463 }
21464 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso.lds.S linux-2.6.39.4/arch/x86/vdso/vdso.lds.S
21465 --- linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-05-19 00:06:34.000000000 -0400
21466 +++ linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-08-05 19:44:35.000000000 -0400
21467 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
21468 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
21469 #include "vextern.h"
21470 #undef VEXTERN
21471 +
21472 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
21473 +VEXTERN(fallback_gettimeofday)
21474 +VEXTERN(fallback_time)
21475 +VEXTERN(getcpu)
21476 +#undef VEXTERN
21477 diff -urNp linux-2.6.39.4/arch/x86/vdso/vextern.h linux-2.6.39.4/arch/x86/vdso/vextern.h
21478 --- linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-05-19 00:06:34.000000000 -0400
21479 +++ linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-08-05 19:44:35.000000000 -0400
21480 @@ -11,6 +11,5 @@
21481 put into vextern.h and be referenced as a pointer with vdso prefix.
21482 The main kernel later fills in the values. */
21483
21484 -VEXTERN(jiffies)
21485 VEXTERN(vgetcpu_mode)
21486 VEXTERN(vsyscall_gtod_data)
21487 diff -urNp linux-2.6.39.4/arch/x86/vdso/vma.c linux-2.6.39.4/arch/x86/vdso/vma.c
21488 --- linux-2.6.39.4/arch/x86/vdso/vma.c 2011-05-19 00:06:34.000000000 -0400
21489 +++ linux-2.6.39.4/arch/x86/vdso/vma.c 2011-08-05 19:44:35.000000000 -0400
21490 @@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
21491 if (!vbase)
21492 goto oom;
21493
21494 - if (memcmp(vbase, "\177ELF", 4)) {
21495 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
21496 printk("VDSO: I'm broken; not ELF\n");
21497 vdso_enabled = 0;
21498 }
21499 @@ -118,7 +118,7 @@ int arch_setup_additional_pages(struct l
21500 goto up_fail;
21501 }
21502
21503 - current->mm->context.vdso = (void *)addr;
21504 + current->mm->context.vdso = addr;
21505
21506 ret = install_special_mapping(mm, addr, vdso_size,
21507 VM_READ|VM_EXEC|
21508 @@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l
21509 VM_ALWAYSDUMP,
21510 vdso_pages);
21511 if (ret) {
21512 - current->mm->context.vdso = NULL;
21513 + current->mm->context.vdso = 0;
21514 goto up_fail;
21515 }
21516
21517 @@ -134,10 +134,3 @@ up_fail:
21518 up_write(&mm->mmap_sem);
21519 return ret;
21520 }
21521 -
21522 -static __init int vdso_setup(char *s)
21523 -{
21524 - vdso_enabled = simple_strtoul(s, NULL, 0);
21525 - return 0;
21526 -}
21527 -__setup("vdso=", vdso_setup);
21528 diff -urNp linux-2.6.39.4/arch/x86/xen/enlighten.c linux-2.6.39.4/arch/x86/xen/enlighten.c
21529 --- linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-05-19 00:06:34.000000000 -0400
21530 +++ linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-08-05 19:44:35.000000000 -0400
21531 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21532
21533 struct shared_info xen_dummy_shared_info;
21534
21535 -void *xen_initial_gdt;
21536 -
21537 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21538 __read_mostly int xen_have_vector_callback;
21539 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21540 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21541 #endif
21542 };
21543
21544 -static void xen_reboot(int reason)
21545 +static __noreturn void xen_reboot(int reason)
21546 {
21547 struct sched_shutdown r = { .reason = reason };
21548
21549 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21550 BUG();
21551 }
21552
21553 -static void xen_restart(char *msg)
21554 +static __noreturn void xen_restart(char *msg)
21555 {
21556 xen_reboot(SHUTDOWN_reboot);
21557 }
21558
21559 -static void xen_emergency_restart(void)
21560 +static __noreturn void xen_emergency_restart(void)
21561 {
21562 xen_reboot(SHUTDOWN_reboot);
21563 }
21564
21565 -static void xen_machine_halt(void)
21566 +static __noreturn void xen_machine_halt(void)
21567 {
21568 xen_reboot(SHUTDOWN_poweroff);
21569 }
21570 @@ -1127,7 +1125,17 @@ asmlinkage void __init xen_start_kernel(
21571 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21572
21573 /* Work out if we support NX */
21574 - x86_configure_nx();
21575 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21576 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21577 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21578 + unsigned l, h;
21579 +
21580 + __supported_pte_mask |= _PAGE_NX;
21581 + rdmsr(MSR_EFER, l, h);
21582 + l |= EFER_NX;
21583 + wrmsr(MSR_EFER, l, h);
21584 + }
21585 +#endif
21586
21587 xen_setup_features();
21588
21589 @@ -1158,13 +1166,6 @@ asmlinkage void __init xen_start_kernel(
21590
21591 machine_ops = xen_machine_ops;
21592
21593 - /*
21594 - * The only reliable way to retain the initial address of the
21595 - * percpu gdt_page is to remember it here, so we can go and
21596 - * mark it RW later, when the initial percpu area is freed.
21597 - */
21598 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21599 -
21600 xen_smp_init();
21601
21602 #ifdef CONFIG_ACPI_NUMA
21603 diff -urNp linux-2.6.39.4/arch/x86/xen/mmu.c linux-2.6.39.4/arch/x86/xen/mmu.c
21604 --- linux-2.6.39.4/arch/x86/xen/mmu.c 2011-07-09 09:18:51.000000000 -0400
21605 +++ linux-2.6.39.4/arch/x86/xen/mmu.c 2011-08-05 19:44:35.000000000 -0400
21606 @@ -1801,6 +1801,8 @@ __init pgd_t *xen_setup_kernel_pagetable
21607 convert_pfn_mfn(init_level4_pgt);
21608 convert_pfn_mfn(level3_ident_pgt);
21609 convert_pfn_mfn(level3_kernel_pgt);
21610 + convert_pfn_mfn(level3_vmalloc_pgt);
21611 + convert_pfn_mfn(level3_vmemmap_pgt);
21612
21613 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21614 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21615 @@ -1819,7 +1821,10 @@ __init pgd_t *xen_setup_kernel_pagetable
21616 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21617 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21618 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21619 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21620 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21621 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21622 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21623 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21624 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21625
21626 diff -urNp linux-2.6.39.4/arch/x86/xen/smp.c linux-2.6.39.4/arch/x86/xen/smp.c
21627 --- linux-2.6.39.4/arch/x86/xen/smp.c 2011-07-09 09:18:51.000000000 -0400
21628 +++ linux-2.6.39.4/arch/x86/xen/smp.c 2011-08-05 19:44:35.000000000 -0400
21629 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
21630 {
21631 BUG_ON(smp_processor_id() != 0);
21632 native_smp_prepare_boot_cpu();
21633 -
21634 - /* We've switched to the "real" per-cpu gdt, so make sure the
21635 - old memory can be recycled */
21636 - make_lowmem_page_readwrite(xen_initial_gdt);
21637 -
21638 xen_filter_cpu_maps();
21639 xen_setup_vcpu_info_placement();
21640 }
21641 @@ -266,12 +261,12 @@ cpu_initialize_context(unsigned int cpu,
21642 gdt = get_cpu_gdt_table(cpu);
21643
21644 ctxt->flags = VGCF_IN_KERNEL;
21645 - ctxt->user_regs.ds = __USER_DS;
21646 - ctxt->user_regs.es = __USER_DS;
21647 + ctxt->user_regs.ds = __KERNEL_DS;
21648 + ctxt->user_regs.es = __KERNEL_DS;
21649 ctxt->user_regs.ss = __KERNEL_DS;
21650 #ifdef CONFIG_X86_32
21651 ctxt->user_regs.fs = __KERNEL_PERCPU;
21652 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21653 + savesegment(gs, ctxt->user_regs.gs);
21654 #else
21655 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21656 #endif
21657 @@ -322,13 +317,12 @@ static int __cpuinit xen_cpu_up(unsigned
21658 int rc;
21659
21660 per_cpu(current_task, cpu) = idle;
21661 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21662 #ifdef CONFIG_X86_32
21663 irq_ctx_init(cpu);
21664 #else
21665 clear_tsk_thread_flag(idle, TIF_FORK);
21666 - per_cpu(kernel_stack, cpu) =
21667 - (unsigned long)task_stack_page(idle) -
21668 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21669 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21670 #endif
21671 xen_setup_runstate_info(cpu);
21672 xen_setup_timer(cpu);
21673 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-asm_32.S linux-2.6.39.4/arch/x86/xen/xen-asm_32.S
21674 --- linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-05-19 00:06:34.000000000 -0400
21675 +++ linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-08-05 19:44:35.000000000 -0400
21676 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21677 ESP_OFFSET=4 # bytes pushed onto stack
21678
21679 /*
21680 - * Store vcpu_info pointer for easy access. Do it this way to
21681 - * avoid having to reload %fs
21682 + * Store vcpu_info pointer for easy access.
21683 */
21684 #ifdef CONFIG_SMP
21685 - GET_THREAD_INFO(%eax)
21686 - movl TI_cpu(%eax), %eax
21687 - movl __per_cpu_offset(,%eax,4), %eax
21688 - mov xen_vcpu(%eax), %eax
21689 + push %fs
21690 + mov $(__KERNEL_PERCPU), %eax
21691 + mov %eax, %fs
21692 + mov PER_CPU_VAR(xen_vcpu), %eax
21693 + pop %fs
21694 #else
21695 movl xen_vcpu, %eax
21696 #endif
21697 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-head.S linux-2.6.39.4/arch/x86/xen/xen-head.S
21698 --- linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-05-19 00:06:34.000000000 -0400
21699 +++ linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-08-05 19:44:35.000000000 -0400
21700 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21701 #ifdef CONFIG_X86_32
21702 mov %esi,xen_start_info
21703 mov $init_thread_union+THREAD_SIZE,%esp
21704 +#ifdef CONFIG_SMP
21705 + movl $cpu_gdt_table,%edi
21706 + movl $__per_cpu_load,%eax
21707 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21708 + rorl $16,%eax
21709 + movb %al,__KERNEL_PERCPU + 4(%edi)
21710 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21711 + movl $__per_cpu_end - 1,%eax
21712 + subl $__per_cpu_start,%eax
21713 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21714 +#endif
21715 #else
21716 mov %rsi,xen_start_info
21717 mov $init_thread_union+THREAD_SIZE,%rsp
21718 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-ops.h linux-2.6.39.4/arch/x86/xen/xen-ops.h
21719 --- linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-05-19 00:06:34.000000000 -0400
21720 +++ linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-08-05 19:44:35.000000000 -0400
21721 @@ -10,8 +10,6 @@
21722 extern const char xen_hypervisor_callback[];
21723 extern const char xen_failsafe_callback[];
21724
21725 -extern void *xen_initial_gdt;
21726 -
21727 struct trap_info;
21728 void xen_copy_trap_info(struct trap_info *traps);
21729
21730 diff -urNp linux-2.6.39.4/block/blk-iopoll.c linux-2.6.39.4/block/blk-iopoll.c
21731 --- linux-2.6.39.4/block/blk-iopoll.c 2011-05-19 00:06:34.000000000 -0400
21732 +++ linux-2.6.39.4/block/blk-iopoll.c 2011-08-05 19:44:35.000000000 -0400
21733 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21734 }
21735 EXPORT_SYMBOL(blk_iopoll_complete);
21736
21737 -static void blk_iopoll_softirq(struct softirq_action *h)
21738 +static void blk_iopoll_softirq(void)
21739 {
21740 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21741 int rearm = 0, budget = blk_iopoll_budget;
21742 diff -urNp linux-2.6.39.4/block/blk-map.c linux-2.6.39.4/block/blk-map.c
21743 --- linux-2.6.39.4/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
21744 +++ linux-2.6.39.4/block/blk-map.c 2011-08-05 19:44:35.000000000 -0400
21745 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21746 if (!len || !kbuf)
21747 return -EINVAL;
21748
21749 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21750 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21751 if (do_copy)
21752 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21753 else
21754 diff -urNp linux-2.6.39.4/block/blk-softirq.c linux-2.6.39.4/block/blk-softirq.c
21755 --- linux-2.6.39.4/block/blk-softirq.c 2011-05-19 00:06:34.000000000 -0400
21756 +++ linux-2.6.39.4/block/blk-softirq.c 2011-08-05 19:44:35.000000000 -0400
21757 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21758 * Softirq action handler - move entries to local list and loop over them
21759 * while passing them to the queue registered handler.
21760 */
21761 -static void blk_done_softirq(struct softirq_action *h)
21762 +static void blk_done_softirq(void)
21763 {
21764 struct list_head *cpu_list, local_list;
21765
21766 diff -urNp linux-2.6.39.4/block/bsg.c linux-2.6.39.4/block/bsg.c
21767 --- linux-2.6.39.4/block/bsg.c 2011-05-19 00:06:34.000000000 -0400
21768 +++ linux-2.6.39.4/block/bsg.c 2011-08-05 19:44:35.000000000 -0400
21769 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21770 struct sg_io_v4 *hdr, struct bsg_device *bd,
21771 fmode_t has_write_perm)
21772 {
21773 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21774 + unsigned char *cmdptr;
21775 +
21776 if (hdr->request_len > BLK_MAX_CDB) {
21777 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21778 if (!rq->cmd)
21779 return -ENOMEM;
21780 - }
21781 + cmdptr = rq->cmd;
21782 + } else
21783 + cmdptr = tmpcmd;
21784
21785 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21786 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21787 hdr->request_len))
21788 return -EFAULT;
21789
21790 + if (cmdptr != rq->cmd)
21791 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21792 +
21793 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21794 if (blk_verify_command(rq->cmd, has_write_perm))
21795 return -EPERM;
21796 diff -urNp linux-2.6.39.4/block/scsi_ioctl.c linux-2.6.39.4/block/scsi_ioctl.c
21797 --- linux-2.6.39.4/block/scsi_ioctl.c 2011-05-19 00:06:34.000000000 -0400
21798 +++ linux-2.6.39.4/block/scsi_ioctl.c 2011-08-05 19:44:35.000000000 -0400
21799 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21800 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21801 struct sg_io_hdr *hdr, fmode_t mode)
21802 {
21803 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21804 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21805 + unsigned char *cmdptr;
21806 +
21807 + if (rq->cmd != rq->__cmd)
21808 + cmdptr = rq->cmd;
21809 + else
21810 + cmdptr = tmpcmd;
21811 +
21812 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21813 return -EFAULT;
21814 +
21815 + if (cmdptr != rq->cmd)
21816 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21817 +
21818 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21819 return -EPERM;
21820
21821 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21822 int err;
21823 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21824 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21825 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21826 + unsigned char *cmdptr;
21827
21828 if (!sic)
21829 return -EINVAL;
21830 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21831 */
21832 err = -EFAULT;
21833 rq->cmd_len = cmdlen;
21834 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21835 +
21836 + if (rq->cmd != rq->__cmd)
21837 + cmdptr = rq->cmd;
21838 + else
21839 + cmdptr = tmpcmd;
21840 +
21841 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21842 goto error;
21843
21844 + if (rq->cmd != cmdptr)
21845 + memcpy(rq->cmd, cmdptr, cmdlen);
21846 +
21847 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21848 goto error;
21849
21850 diff -urNp linux-2.6.39.4/crypto/cryptd.c linux-2.6.39.4/crypto/cryptd.c
21851 --- linux-2.6.39.4/crypto/cryptd.c 2011-05-19 00:06:34.000000000 -0400
21852 +++ linux-2.6.39.4/crypto/cryptd.c 2011-08-05 20:34:06.000000000 -0400
21853 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21854
21855 struct cryptd_blkcipher_request_ctx {
21856 crypto_completion_t complete;
21857 -};
21858 +} __no_const;
21859
21860 struct cryptd_hash_ctx {
21861 struct crypto_shash *child;
21862 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21863
21864 struct cryptd_aead_request_ctx {
21865 crypto_completion_t complete;
21866 -};
21867 +} __no_const;
21868
21869 static void cryptd_queue_worker(struct work_struct *work);
21870
21871 diff -urNp linux-2.6.39.4/crypto/gf128mul.c linux-2.6.39.4/crypto/gf128mul.c
21872 --- linux-2.6.39.4/crypto/gf128mul.c 2011-05-19 00:06:34.000000000 -0400
21873 +++ linux-2.6.39.4/crypto/gf128mul.c 2011-08-05 19:44:35.000000000 -0400
21874 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21875 for (i = 0; i < 7; ++i)
21876 gf128mul_x_lle(&p[i + 1], &p[i]);
21877
21878 - memset(r, 0, sizeof(r));
21879 + memset(r, 0, sizeof(*r));
21880 for (i = 0;;) {
21881 u8 ch = ((u8 *)b)[15 - i];
21882
21883 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21884 for (i = 0; i < 7; ++i)
21885 gf128mul_x_bbe(&p[i + 1], &p[i]);
21886
21887 - memset(r, 0, sizeof(r));
21888 + memset(r, 0, sizeof(*r));
21889 for (i = 0;;) {
21890 u8 ch = ((u8 *)b)[i];
21891
21892 diff -urNp linux-2.6.39.4/crypto/serpent.c linux-2.6.39.4/crypto/serpent.c
21893 --- linux-2.6.39.4/crypto/serpent.c 2011-05-19 00:06:34.000000000 -0400
21894 +++ linux-2.6.39.4/crypto/serpent.c 2011-08-05 19:44:35.000000000 -0400
21895 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21896 u32 r0,r1,r2,r3,r4;
21897 int i;
21898
21899 + pax_track_stack();
21900 +
21901 /* Copy key, add padding */
21902
21903 for (i = 0; i < keylen; ++i)
21904 diff -urNp linux-2.6.39.4/Documentation/dontdiff linux-2.6.39.4/Documentation/dontdiff
21905 --- linux-2.6.39.4/Documentation/dontdiff 2011-05-19 00:06:34.000000000 -0400
21906 +++ linux-2.6.39.4/Documentation/dontdiff 2011-08-05 19:44:35.000000000 -0400
21907 @@ -1,13 +1,16 @@
21908 *.a
21909 *.aux
21910 *.bin
21911 +*.cis
21912 *.cpio
21913 *.csp
21914 +*.dbg
21915 *.dsp
21916 *.dvi
21917 *.elf
21918 *.eps
21919 *.fw
21920 +*.gcno
21921 *.gen.S
21922 *.gif
21923 *.grep
21924 @@ -38,8 +41,10 @@
21925 *.tab.h
21926 *.tex
21927 *.ver
21928 +*.vim
21929 *.xml
21930 *_MODULES
21931 +*_reg_safe.h
21932 *_vga16.c
21933 *~
21934 *.9
21935 @@ -49,11 +54,16 @@
21936 53c700_d.h
21937 CVS
21938 ChangeSet
21939 +GPATH
21940 +GRTAGS
21941 +GSYMS
21942 +GTAGS
21943 Image
21944 Kerntypes
21945 Module.markers
21946 Module.symvers
21947 PENDING
21948 +PERF*
21949 SCCS
21950 System.map*
21951 TAGS
21952 @@ -80,8 +90,11 @@ btfixupprep
21953 build
21954 bvmlinux
21955 bzImage*
21956 +capability_names.h
21957 capflags.c
21958 classlist.h*
21959 +clut_vga16.c
21960 +common-cmds.h
21961 comp*.log
21962 compile.h*
21963 conf
21964 @@ -106,16 +119,19 @@ fore200e_mkfirm
21965 fore200e_pca_fw.c*
21966 gconf
21967 gen-devlist
21968 +gen-kdb_cmds.c
21969 gen_crc32table
21970 gen_init_cpio
21971 generated
21972 genheaders
21973 genksyms
21974 *_gray256.c
21975 +hash
21976 ihex2fw
21977 ikconfig.h*
21978 inat-tables.c
21979 initramfs_data.cpio
21980 +initramfs_data.cpio.bz2
21981 initramfs_data.cpio.gz
21982 initramfs_list
21983 int16.c
21984 @@ -125,7 +141,6 @@ int32.c
21985 int4.c
21986 int8.c
21987 kallsyms
21988 -kconfig
21989 keywords.c
21990 ksym.c*
21991 ksym.h*
21992 @@ -149,7 +164,9 @@ mkboot
21993 mkbugboot
21994 mkcpustr
21995 mkdep
21996 +mkpiggy
21997 mkprep
21998 +mkregtable
21999 mktables
22000 mktree
22001 modpost
22002 @@ -165,6 +182,7 @@ parse.h
22003 patches*
22004 pca200e.bin
22005 pca200e_ecd.bin2
22006 +perf-archive
22007 piggy.gz
22008 piggyback
22009 piggy.S
22010 @@ -180,7 +198,9 @@ r600_reg_safe.h
22011 raid6altivec*.c
22012 raid6int*.c
22013 raid6tables.c
22014 +regdb.c
22015 relocs
22016 +rlim_names.h
22017 rn50_reg_safe.h
22018 rs600_reg_safe.h
22019 rv515_reg_safe.h
22020 @@ -189,6 +209,7 @@ setup
22021 setup.bin
22022 setup.elf
22023 sImage
22024 +slabinfo
22025 sm_tbl*
22026 split-include
22027 syscalltab.h
22028 @@ -213,13 +234,17 @@ version.h*
22029 vmlinux
22030 vmlinux-*
22031 vmlinux.aout
22032 +vmlinux.bin.all
22033 +vmlinux.bin.bz2
22034 vmlinux.lds
22035 +vmlinux.relocs
22036 voffset.h
22037 vsyscall.lds
22038 vsyscall_32.lds
22039 wanxlfw.inc
22040 uImage
22041 unifdef
22042 +utsrelease.h
22043 wakeup.bin
22044 wakeup.elf
22045 wakeup.lds
22046 diff -urNp linux-2.6.39.4/Documentation/kernel-parameters.txt linux-2.6.39.4/Documentation/kernel-parameters.txt
22047 --- linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-06-25 12:55:22.000000000 -0400
22048 +++ linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-08-05 19:44:35.000000000 -0400
22049 @@ -1879,6 +1879,13 @@ bytes respectively. Such letter suffixes
22050 the specified number of seconds. This is to be used if
22051 your oopses keep scrolling off the screen.
22052
22053 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22054 + virtualization environments that don't cope well with the
22055 + expand down segment used by UDEREF on X86-32 or the frequent
22056 + page table updates on X86-64.
22057 +
22058 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22059 +
22060 pcbit= [HW,ISDN]
22061
22062 pcd. [PARIDE]
22063 diff -urNp linux-2.6.39.4/drivers/acpi/apei/cper.c linux-2.6.39.4/drivers/acpi/apei/cper.c
22064 --- linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-05-19 00:06:34.000000000 -0400
22065 +++ linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-08-05 19:44:35.000000000 -0400
22066 @@ -38,12 +38,12 @@
22067 */
22068 u64 cper_next_record_id(void)
22069 {
22070 - static atomic64_t seq;
22071 + static atomic64_unchecked_t seq;
22072
22073 - if (!atomic64_read(&seq))
22074 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
22075 + if (!atomic64_read_unchecked(&seq))
22076 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22077
22078 - return atomic64_inc_return(&seq);
22079 + return atomic64_inc_return_unchecked(&seq);
22080 }
22081 EXPORT_SYMBOL_GPL(cper_next_record_id);
22082
22083 diff -urNp linux-2.6.39.4/drivers/acpi/power_meter.c linux-2.6.39.4/drivers/acpi/power_meter.c
22084 --- linux-2.6.39.4/drivers/acpi/power_meter.c 2011-05-19 00:06:34.000000000 -0400
22085 +++ linux-2.6.39.4/drivers/acpi/power_meter.c 2011-08-05 19:44:35.000000000 -0400
22086 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
22087 return res;
22088
22089 temp /= 1000;
22090 - if (temp < 0)
22091 - return -EINVAL;
22092
22093 mutex_lock(&resource->lock);
22094 resource->trip[attr->index - 7] = temp;
22095 diff -urNp linux-2.6.39.4/drivers/acpi/proc.c linux-2.6.39.4/drivers/acpi/proc.c
22096 --- linux-2.6.39.4/drivers/acpi/proc.c 2011-05-19 00:06:34.000000000 -0400
22097 +++ linux-2.6.39.4/drivers/acpi/proc.c 2011-08-05 19:44:35.000000000 -0400
22098 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22099 size_t count, loff_t * ppos)
22100 {
22101 struct list_head *node, *next;
22102 - char strbuf[5];
22103 - char str[5] = "";
22104 - unsigned int len = count;
22105 -
22106 - if (len > 4)
22107 - len = 4;
22108 - if (len < 0)
22109 - return -EFAULT;
22110 + char strbuf[5] = {0};
22111
22112 - if (copy_from_user(strbuf, buffer, len))
22113 + if (count > 4)
22114 + count = 4;
22115 + if (copy_from_user(strbuf, buffer, count))
22116 return -EFAULT;
22117 - strbuf[len] = '\0';
22118 - sscanf(strbuf, "%s", str);
22119 + strbuf[count] = '\0';
22120
22121 mutex_lock(&acpi_device_lock);
22122 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22123 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22124 if (!dev->wakeup.flags.valid)
22125 continue;
22126
22127 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
22128 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22129 if (device_can_wakeup(&dev->dev)) {
22130 bool enable = !device_may_wakeup(&dev->dev);
22131 device_set_wakeup_enable(&dev->dev, enable);
22132 diff -urNp linux-2.6.39.4/drivers/acpi/processor_driver.c linux-2.6.39.4/drivers/acpi/processor_driver.c
22133 --- linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-05-19 00:06:34.000000000 -0400
22134 +++ linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-08-05 19:44:35.000000000 -0400
22135 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22136 return 0;
22137 #endif
22138
22139 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22140 + BUG_ON(pr->id >= nr_cpu_ids);
22141
22142 /*
22143 * Buggy BIOS check
22144 diff -urNp linux-2.6.39.4/drivers/ata/libata-core.c linux-2.6.39.4/drivers/ata/libata-core.c
22145 --- linux-2.6.39.4/drivers/ata/libata-core.c 2011-05-19 00:06:34.000000000 -0400
22146 +++ linux-2.6.39.4/drivers/ata/libata-core.c 2011-08-05 20:34:06.000000000 -0400
22147 @@ -4747,7 +4747,7 @@ void ata_qc_free(struct ata_queued_cmd *
22148 struct ata_port *ap;
22149 unsigned int tag;
22150
22151 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22152 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22153 ap = qc->ap;
22154
22155 qc->flags = 0;
22156 @@ -4763,7 +4763,7 @@ void __ata_qc_complete(struct ata_queued
22157 struct ata_port *ap;
22158 struct ata_link *link;
22159
22160 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22161 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22162 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22163 ap = qc->ap;
22164 link = qc->dev->link;
22165 @@ -5768,6 +5768,7 @@ static void ata_finalize_port_ops(struct
22166 return;
22167
22168 spin_lock(&lock);
22169 + pax_open_kernel();
22170
22171 for (cur = ops->inherits; cur; cur = cur->inherits) {
22172 void **inherit = (void **)cur;
22173 @@ -5781,8 +5782,9 @@ static void ata_finalize_port_ops(struct
22174 if (IS_ERR(*pp))
22175 *pp = NULL;
22176
22177 - ops->inherits = NULL;
22178 + *(struct ata_port_operations **)&ops->inherits = NULL;
22179
22180 + pax_close_kernel();
22181 spin_unlock(&lock);
22182 }
22183
22184 diff -urNp linux-2.6.39.4/drivers/ata/libata-eh.c linux-2.6.39.4/drivers/ata/libata-eh.c
22185 --- linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:11:51.000000000 -0400
22186 +++ linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:12:20.000000000 -0400
22187 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22188 {
22189 struct ata_link *link;
22190
22191 + pax_track_stack();
22192 +
22193 ata_for_each_link(link, ap, HOST_FIRST)
22194 ata_eh_link_report(link);
22195 }
22196 diff -urNp linux-2.6.39.4/drivers/ata/pata_arasan_cf.c linux-2.6.39.4/drivers/ata/pata_arasan_cf.c
22197 --- linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-05-19 00:06:34.000000000 -0400
22198 +++ linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-08-05 20:34:06.000000000 -0400
22199 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22200 /* Handle platform specific quirks */
22201 if (pdata->quirk) {
22202 if (pdata->quirk & CF_BROKEN_PIO) {
22203 - ap->ops->set_piomode = NULL;
22204 + pax_open_kernel();
22205 + *(void **)&ap->ops->set_piomode = NULL;
22206 + pax_close_kernel();
22207 ap->pio_mask = 0;
22208 }
22209 if (pdata->quirk & CF_BROKEN_MWDMA)
22210 diff -urNp linux-2.6.39.4/drivers/atm/adummy.c linux-2.6.39.4/drivers/atm/adummy.c
22211 --- linux-2.6.39.4/drivers/atm/adummy.c 2011-05-19 00:06:34.000000000 -0400
22212 +++ linux-2.6.39.4/drivers/atm/adummy.c 2011-08-05 19:44:36.000000000 -0400
22213 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22214 vcc->pop(vcc, skb);
22215 else
22216 dev_kfree_skb_any(skb);
22217 - atomic_inc(&vcc->stats->tx);
22218 + atomic_inc_unchecked(&vcc->stats->tx);
22219
22220 return 0;
22221 }
22222 diff -urNp linux-2.6.39.4/drivers/atm/ambassador.c linux-2.6.39.4/drivers/atm/ambassador.c
22223 --- linux-2.6.39.4/drivers/atm/ambassador.c 2011-05-19 00:06:34.000000000 -0400
22224 +++ linux-2.6.39.4/drivers/atm/ambassador.c 2011-08-05 19:44:36.000000000 -0400
22225 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22226 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22227
22228 // VC layer stats
22229 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22230 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22231
22232 // free the descriptor
22233 kfree (tx_descr);
22234 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22235 dump_skb ("<<<", vc, skb);
22236
22237 // VC layer stats
22238 - atomic_inc(&atm_vcc->stats->rx);
22239 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22240 __net_timestamp(skb);
22241 // end of our responsibility
22242 atm_vcc->push (atm_vcc, skb);
22243 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22244 } else {
22245 PRINTK (KERN_INFO, "dropped over-size frame");
22246 // should we count this?
22247 - atomic_inc(&atm_vcc->stats->rx_drop);
22248 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22249 }
22250
22251 } else {
22252 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22253 }
22254
22255 if (check_area (skb->data, skb->len)) {
22256 - atomic_inc(&atm_vcc->stats->tx_err);
22257 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22258 return -ENOMEM; // ?
22259 }
22260
22261 diff -urNp linux-2.6.39.4/drivers/atm/atmtcp.c linux-2.6.39.4/drivers/atm/atmtcp.c
22262 --- linux-2.6.39.4/drivers/atm/atmtcp.c 2011-05-19 00:06:34.000000000 -0400
22263 +++ linux-2.6.39.4/drivers/atm/atmtcp.c 2011-08-05 19:44:36.000000000 -0400
22264 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22265 if (vcc->pop) vcc->pop(vcc,skb);
22266 else dev_kfree_skb(skb);
22267 if (dev_data) return 0;
22268 - atomic_inc(&vcc->stats->tx_err);
22269 + atomic_inc_unchecked(&vcc->stats->tx_err);
22270 return -ENOLINK;
22271 }
22272 size = skb->len+sizeof(struct atmtcp_hdr);
22273 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22274 if (!new_skb) {
22275 if (vcc->pop) vcc->pop(vcc,skb);
22276 else dev_kfree_skb(skb);
22277 - atomic_inc(&vcc->stats->tx_err);
22278 + atomic_inc_unchecked(&vcc->stats->tx_err);
22279 return -ENOBUFS;
22280 }
22281 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22282 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22283 if (vcc->pop) vcc->pop(vcc,skb);
22284 else dev_kfree_skb(skb);
22285 out_vcc->push(out_vcc,new_skb);
22286 - atomic_inc(&vcc->stats->tx);
22287 - atomic_inc(&out_vcc->stats->rx);
22288 + atomic_inc_unchecked(&vcc->stats->tx);
22289 + atomic_inc_unchecked(&out_vcc->stats->rx);
22290 return 0;
22291 }
22292
22293 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22294 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22295 read_unlock(&vcc_sklist_lock);
22296 if (!out_vcc) {
22297 - atomic_inc(&vcc->stats->tx_err);
22298 + atomic_inc_unchecked(&vcc->stats->tx_err);
22299 goto done;
22300 }
22301 skb_pull(skb,sizeof(struct atmtcp_hdr));
22302 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22303 __net_timestamp(new_skb);
22304 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22305 out_vcc->push(out_vcc,new_skb);
22306 - atomic_inc(&vcc->stats->tx);
22307 - atomic_inc(&out_vcc->stats->rx);
22308 + atomic_inc_unchecked(&vcc->stats->tx);
22309 + atomic_inc_unchecked(&out_vcc->stats->rx);
22310 done:
22311 if (vcc->pop) vcc->pop(vcc,skb);
22312 else dev_kfree_skb(skb);
22313 diff -urNp linux-2.6.39.4/drivers/atm/eni.c linux-2.6.39.4/drivers/atm/eni.c
22314 --- linux-2.6.39.4/drivers/atm/eni.c 2011-05-19 00:06:34.000000000 -0400
22315 +++ linux-2.6.39.4/drivers/atm/eni.c 2011-08-05 19:44:36.000000000 -0400
22316 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22317 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22318 vcc->dev->number);
22319 length = 0;
22320 - atomic_inc(&vcc->stats->rx_err);
22321 + atomic_inc_unchecked(&vcc->stats->rx_err);
22322 }
22323 else {
22324 length = ATM_CELL_SIZE-1; /* no HEC */
22325 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22326 size);
22327 }
22328 eff = length = 0;
22329 - atomic_inc(&vcc->stats->rx_err);
22330 + atomic_inc_unchecked(&vcc->stats->rx_err);
22331 }
22332 else {
22333 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22334 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22335 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22336 vcc->dev->number,vcc->vci,length,size << 2,descr);
22337 length = eff = 0;
22338 - atomic_inc(&vcc->stats->rx_err);
22339 + atomic_inc_unchecked(&vcc->stats->rx_err);
22340 }
22341 }
22342 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22343 @@ -771,7 +771,7 @@ rx_dequeued++;
22344 vcc->push(vcc,skb);
22345 pushed++;
22346 }
22347 - atomic_inc(&vcc->stats->rx);
22348 + atomic_inc_unchecked(&vcc->stats->rx);
22349 }
22350 wake_up(&eni_dev->rx_wait);
22351 }
22352 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22353 PCI_DMA_TODEVICE);
22354 if (vcc->pop) vcc->pop(vcc,skb);
22355 else dev_kfree_skb_irq(skb);
22356 - atomic_inc(&vcc->stats->tx);
22357 + atomic_inc_unchecked(&vcc->stats->tx);
22358 wake_up(&eni_dev->tx_wait);
22359 dma_complete++;
22360 }
22361 diff -urNp linux-2.6.39.4/drivers/atm/firestream.c linux-2.6.39.4/drivers/atm/firestream.c
22362 --- linux-2.6.39.4/drivers/atm/firestream.c 2011-05-19 00:06:34.000000000 -0400
22363 +++ linux-2.6.39.4/drivers/atm/firestream.c 2011-08-05 19:44:36.000000000 -0400
22364 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22365 }
22366 }
22367
22368 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22369 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22370
22371 fs_dprintk (FS_DEBUG_TXMEM, "i");
22372 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22373 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22374 #endif
22375 skb_put (skb, qe->p1 & 0xffff);
22376 ATM_SKB(skb)->vcc = atm_vcc;
22377 - atomic_inc(&atm_vcc->stats->rx);
22378 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22379 __net_timestamp(skb);
22380 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22381 atm_vcc->push (atm_vcc, skb);
22382 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22383 kfree (pe);
22384 }
22385 if (atm_vcc)
22386 - atomic_inc(&atm_vcc->stats->rx_drop);
22387 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22388 break;
22389 case 0x1f: /* Reassembly abort: no buffers. */
22390 /* Silently increment error counter. */
22391 if (atm_vcc)
22392 - atomic_inc(&atm_vcc->stats->rx_drop);
22393 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22394 break;
22395 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22396 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22397 diff -urNp linux-2.6.39.4/drivers/atm/fore200e.c linux-2.6.39.4/drivers/atm/fore200e.c
22398 --- linux-2.6.39.4/drivers/atm/fore200e.c 2011-05-19 00:06:34.000000000 -0400
22399 +++ linux-2.6.39.4/drivers/atm/fore200e.c 2011-08-05 19:44:36.000000000 -0400
22400 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22401 #endif
22402 /* check error condition */
22403 if (*entry->status & STATUS_ERROR)
22404 - atomic_inc(&vcc->stats->tx_err);
22405 + atomic_inc_unchecked(&vcc->stats->tx_err);
22406 else
22407 - atomic_inc(&vcc->stats->tx);
22408 + atomic_inc_unchecked(&vcc->stats->tx);
22409 }
22410 }
22411
22412 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22413 if (skb == NULL) {
22414 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22415
22416 - atomic_inc(&vcc->stats->rx_drop);
22417 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22418 return -ENOMEM;
22419 }
22420
22421 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22422
22423 dev_kfree_skb_any(skb);
22424
22425 - atomic_inc(&vcc->stats->rx_drop);
22426 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22427 return -ENOMEM;
22428 }
22429
22430 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22431
22432 vcc->push(vcc, skb);
22433 - atomic_inc(&vcc->stats->rx);
22434 + atomic_inc_unchecked(&vcc->stats->rx);
22435
22436 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22437
22438 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22439 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22440 fore200e->atm_dev->number,
22441 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22442 - atomic_inc(&vcc->stats->rx_err);
22443 + atomic_inc_unchecked(&vcc->stats->rx_err);
22444 }
22445 }
22446
22447 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22448 goto retry_here;
22449 }
22450
22451 - atomic_inc(&vcc->stats->tx_err);
22452 + atomic_inc_unchecked(&vcc->stats->tx_err);
22453
22454 fore200e->tx_sat++;
22455 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22456 diff -urNp linux-2.6.39.4/drivers/atm/he.c linux-2.6.39.4/drivers/atm/he.c
22457 --- linux-2.6.39.4/drivers/atm/he.c 2011-05-19 00:06:34.000000000 -0400
22458 +++ linux-2.6.39.4/drivers/atm/he.c 2011-08-05 19:44:36.000000000 -0400
22459 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22460
22461 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22462 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22463 - atomic_inc(&vcc->stats->rx_drop);
22464 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22465 goto return_host_buffers;
22466 }
22467
22468 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22469 RBRQ_LEN_ERR(he_dev->rbrq_head)
22470 ? "LEN_ERR" : "",
22471 vcc->vpi, vcc->vci);
22472 - atomic_inc(&vcc->stats->rx_err);
22473 + atomic_inc_unchecked(&vcc->stats->rx_err);
22474 goto return_host_buffers;
22475 }
22476
22477 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22478 vcc->push(vcc, skb);
22479 spin_lock(&he_dev->global_lock);
22480
22481 - atomic_inc(&vcc->stats->rx);
22482 + atomic_inc_unchecked(&vcc->stats->rx);
22483
22484 return_host_buffers:
22485 ++pdus_assembled;
22486 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22487 tpd->vcc->pop(tpd->vcc, tpd->skb);
22488 else
22489 dev_kfree_skb_any(tpd->skb);
22490 - atomic_inc(&tpd->vcc->stats->tx_err);
22491 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22492 }
22493 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22494 return;
22495 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22496 vcc->pop(vcc, skb);
22497 else
22498 dev_kfree_skb_any(skb);
22499 - atomic_inc(&vcc->stats->tx_err);
22500 + atomic_inc_unchecked(&vcc->stats->tx_err);
22501 return -EINVAL;
22502 }
22503
22504 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22505 vcc->pop(vcc, skb);
22506 else
22507 dev_kfree_skb_any(skb);
22508 - atomic_inc(&vcc->stats->tx_err);
22509 + atomic_inc_unchecked(&vcc->stats->tx_err);
22510 return -EINVAL;
22511 }
22512 #endif
22513 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22514 vcc->pop(vcc, skb);
22515 else
22516 dev_kfree_skb_any(skb);
22517 - atomic_inc(&vcc->stats->tx_err);
22518 + atomic_inc_unchecked(&vcc->stats->tx_err);
22519 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22520 return -ENOMEM;
22521 }
22522 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22523 vcc->pop(vcc, skb);
22524 else
22525 dev_kfree_skb_any(skb);
22526 - atomic_inc(&vcc->stats->tx_err);
22527 + atomic_inc_unchecked(&vcc->stats->tx_err);
22528 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22529 return -ENOMEM;
22530 }
22531 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22532 __enqueue_tpd(he_dev, tpd, cid);
22533 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22534
22535 - atomic_inc(&vcc->stats->tx);
22536 + atomic_inc_unchecked(&vcc->stats->tx);
22537
22538 return 0;
22539 }
22540 diff -urNp linux-2.6.39.4/drivers/atm/horizon.c linux-2.6.39.4/drivers/atm/horizon.c
22541 --- linux-2.6.39.4/drivers/atm/horizon.c 2011-05-19 00:06:34.000000000 -0400
22542 +++ linux-2.6.39.4/drivers/atm/horizon.c 2011-08-05 19:44:36.000000000 -0400
22543 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22544 {
22545 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22546 // VC layer stats
22547 - atomic_inc(&vcc->stats->rx);
22548 + atomic_inc_unchecked(&vcc->stats->rx);
22549 __net_timestamp(skb);
22550 // end of our responsibility
22551 vcc->push (vcc, skb);
22552 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22553 dev->tx_iovec = NULL;
22554
22555 // VC layer stats
22556 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22557 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22558
22559 // free the skb
22560 hrz_kfree_skb (skb);
22561 diff -urNp linux-2.6.39.4/drivers/atm/idt77252.c linux-2.6.39.4/drivers/atm/idt77252.c
22562 --- linux-2.6.39.4/drivers/atm/idt77252.c 2011-05-19 00:06:34.000000000 -0400
22563 +++ linux-2.6.39.4/drivers/atm/idt77252.c 2011-08-05 19:44:36.000000000 -0400
22564 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22565 else
22566 dev_kfree_skb(skb);
22567
22568 - atomic_inc(&vcc->stats->tx);
22569 + atomic_inc_unchecked(&vcc->stats->tx);
22570 }
22571
22572 atomic_dec(&scq->used);
22573 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22574 if ((sb = dev_alloc_skb(64)) == NULL) {
22575 printk("%s: Can't allocate buffers for aal0.\n",
22576 card->name);
22577 - atomic_add(i, &vcc->stats->rx_drop);
22578 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22579 break;
22580 }
22581 if (!atm_charge(vcc, sb->truesize)) {
22582 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22583 card->name);
22584 - atomic_add(i - 1, &vcc->stats->rx_drop);
22585 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22586 dev_kfree_skb(sb);
22587 break;
22588 }
22589 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22590 ATM_SKB(sb)->vcc = vcc;
22591 __net_timestamp(sb);
22592 vcc->push(vcc, sb);
22593 - atomic_inc(&vcc->stats->rx);
22594 + atomic_inc_unchecked(&vcc->stats->rx);
22595
22596 cell += ATM_CELL_PAYLOAD;
22597 }
22598 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22599 "(CDC: %08x)\n",
22600 card->name, len, rpp->len, readl(SAR_REG_CDC));
22601 recycle_rx_pool_skb(card, rpp);
22602 - atomic_inc(&vcc->stats->rx_err);
22603 + atomic_inc_unchecked(&vcc->stats->rx_err);
22604 return;
22605 }
22606 if (stat & SAR_RSQE_CRC) {
22607 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22608 recycle_rx_pool_skb(card, rpp);
22609 - atomic_inc(&vcc->stats->rx_err);
22610 + atomic_inc_unchecked(&vcc->stats->rx_err);
22611 return;
22612 }
22613 if (skb_queue_len(&rpp->queue) > 1) {
22614 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22615 RXPRINTK("%s: Can't alloc RX skb.\n",
22616 card->name);
22617 recycle_rx_pool_skb(card, rpp);
22618 - atomic_inc(&vcc->stats->rx_err);
22619 + atomic_inc_unchecked(&vcc->stats->rx_err);
22620 return;
22621 }
22622 if (!atm_charge(vcc, skb->truesize)) {
22623 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22624 __net_timestamp(skb);
22625
22626 vcc->push(vcc, skb);
22627 - atomic_inc(&vcc->stats->rx);
22628 + atomic_inc_unchecked(&vcc->stats->rx);
22629
22630 return;
22631 }
22632 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22633 __net_timestamp(skb);
22634
22635 vcc->push(vcc, skb);
22636 - atomic_inc(&vcc->stats->rx);
22637 + atomic_inc_unchecked(&vcc->stats->rx);
22638
22639 if (skb->truesize > SAR_FB_SIZE_3)
22640 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22641 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22642 if (vcc->qos.aal != ATM_AAL0) {
22643 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22644 card->name, vpi, vci);
22645 - atomic_inc(&vcc->stats->rx_drop);
22646 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22647 goto drop;
22648 }
22649
22650 if ((sb = dev_alloc_skb(64)) == NULL) {
22651 printk("%s: Can't allocate buffers for AAL0.\n",
22652 card->name);
22653 - atomic_inc(&vcc->stats->rx_err);
22654 + atomic_inc_unchecked(&vcc->stats->rx_err);
22655 goto drop;
22656 }
22657
22658 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22659 ATM_SKB(sb)->vcc = vcc;
22660 __net_timestamp(sb);
22661 vcc->push(vcc, sb);
22662 - atomic_inc(&vcc->stats->rx);
22663 + atomic_inc_unchecked(&vcc->stats->rx);
22664
22665 drop:
22666 skb_pull(queue, 64);
22667 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22668
22669 if (vc == NULL) {
22670 printk("%s: NULL connection in send().\n", card->name);
22671 - atomic_inc(&vcc->stats->tx_err);
22672 + atomic_inc_unchecked(&vcc->stats->tx_err);
22673 dev_kfree_skb(skb);
22674 return -EINVAL;
22675 }
22676 if (!test_bit(VCF_TX, &vc->flags)) {
22677 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22678 - atomic_inc(&vcc->stats->tx_err);
22679 + atomic_inc_unchecked(&vcc->stats->tx_err);
22680 dev_kfree_skb(skb);
22681 return -EINVAL;
22682 }
22683 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22684 break;
22685 default:
22686 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22687 - atomic_inc(&vcc->stats->tx_err);
22688 + atomic_inc_unchecked(&vcc->stats->tx_err);
22689 dev_kfree_skb(skb);
22690 return -EINVAL;
22691 }
22692
22693 if (skb_shinfo(skb)->nr_frags != 0) {
22694 printk("%s: No scatter-gather yet.\n", card->name);
22695 - atomic_inc(&vcc->stats->tx_err);
22696 + atomic_inc_unchecked(&vcc->stats->tx_err);
22697 dev_kfree_skb(skb);
22698 return -EINVAL;
22699 }
22700 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22701
22702 err = queue_skb(card, vc, skb, oam);
22703 if (err) {
22704 - atomic_inc(&vcc->stats->tx_err);
22705 + atomic_inc_unchecked(&vcc->stats->tx_err);
22706 dev_kfree_skb(skb);
22707 return err;
22708 }
22709 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22710 skb = dev_alloc_skb(64);
22711 if (!skb) {
22712 printk("%s: Out of memory in send_oam().\n", card->name);
22713 - atomic_inc(&vcc->stats->tx_err);
22714 + atomic_inc_unchecked(&vcc->stats->tx_err);
22715 return -ENOMEM;
22716 }
22717 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22718 diff -urNp linux-2.6.39.4/drivers/atm/iphase.c linux-2.6.39.4/drivers/atm/iphase.c
22719 --- linux-2.6.39.4/drivers/atm/iphase.c 2011-05-19 00:06:34.000000000 -0400
22720 +++ linux-2.6.39.4/drivers/atm/iphase.c 2011-08-05 19:44:36.000000000 -0400
22721 @@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
22722 status = (u_short) (buf_desc_ptr->desc_mode);
22723 if (status & (RX_CER | RX_PTE | RX_OFL))
22724 {
22725 - atomic_inc(&vcc->stats->rx_err);
22726 + atomic_inc_unchecked(&vcc->stats->rx_err);
22727 IF_ERR(printk("IA: bad packet, dropping it");)
22728 if (status & RX_CER) {
22729 IF_ERR(printk(" cause: packet CRC error\n");)
22730 @@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
22731 len = dma_addr - buf_addr;
22732 if (len > iadev->rx_buf_sz) {
22733 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22734 - atomic_inc(&vcc->stats->rx_err);
22735 + atomic_inc_unchecked(&vcc->stats->rx_err);
22736 goto out_free_desc;
22737 }
22738
22739 @@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
22740 ia_vcc = INPH_IA_VCC(vcc);
22741 if (ia_vcc == NULL)
22742 {
22743 - atomic_inc(&vcc->stats->rx_err);
22744 + atomic_inc_unchecked(&vcc->stats->rx_err);
22745 dev_kfree_skb_any(skb);
22746 atm_return(vcc, atm_guess_pdu2truesize(len));
22747 goto INCR_DLE;
22748 @@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
22749 if ((length > iadev->rx_buf_sz) || (length >
22750 (skb->len - sizeof(struct cpcs_trailer))))
22751 {
22752 - atomic_inc(&vcc->stats->rx_err);
22753 + atomic_inc_unchecked(&vcc->stats->rx_err);
22754 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22755 length, skb->len);)
22756 dev_kfree_skb_any(skb);
22757 @@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
22758
22759 IF_RX(printk("rx_dle_intr: skb push");)
22760 vcc->push(vcc,skb);
22761 - atomic_inc(&vcc->stats->rx);
22762 + atomic_inc_unchecked(&vcc->stats->rx);
22763 iadev->rx_pkt_cnt++;
22764 }
22765 INCR_DLE:
22766 @@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
22767 {
22768 struct k_sonet_stats *stats;
22769 stats = &PRIV(_ia_dev[board])->sonet_stats;
22770 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22771 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22772 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22773 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22774 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22775 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22776 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22777 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22778 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22779 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22780 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22781 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22782 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22783 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22784 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22785 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22786 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22787 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22788 }
22789 ia_cmds.status = 0;
22790 break;
22791 @@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22792 if ((desc == 0) || (desc > iadev->num_tx_desc))
22793 {
22794 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22795 - atomic_inc(&vcc->stats->tx);
22796 + atomic_inc_unchecked(&vcc->stats->tx);
22797 if (vcc->pop)
22798 vcc->pop(vcc, skb);
22799 else
22800 @@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22801 ATM_DESC(skb) = vcc->vci;
22802 skb_queue_tail(&iadev->tx_dma_q, skb);
22803
22804 - atomic_inc(&vcc->stats->tx);
22805 + atomic_inc_unchecked(&vcc->stats->tx);
22806 iadev->tx_pkt_cnt++;
22807 /* Increment transaction counter */
22808 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22809
22810 #if 0
22811 /* add flow control logic */
22812 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22813 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22814 if (iavcc->vc_desc_cnt > 10) {
22815 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22816 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22817 diff -urNp linux-2.6.39.4/drivers/atm/lanai.c linux-2.6.39.4/drivers/atm/lanai.c
22818 --- linux-2.6.39.4/drivers/atm/lanai.c 2011-05-19 00:06:34.000000000 -0400
22819 +++ linux-2.6.39.4/drivers/atm/lanai.c 2011-08-05 19:44:36.000000000 -0400
22820 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22821 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22822 lanai_endtx(lanai, lvcc);
22823 lanai_free_skb(lvcc->tx.atmvcc, skb);
22824 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22825 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22826 }
22827
22828 /* Try to fill the buffer - don't call unless there is backlog */
22829 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22830 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22831 __net_timestamp(skb);
22832 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22833 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22834 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22835 out:
22836 lvcc->rx.buf.ptr = end;
22837 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22838 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22839 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22840 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22841 lanai->stats.service_rxnotaal5++;
22842 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22843 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22844 return 0;
22845 }
22846 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22847 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22848 int bytes;
22849 read_unlock(&vcc_sklist_lock);
22850 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22851 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22852 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22853 lvcc->stats.x.aal5.service_trash++;
22854 bytes = (SERVICE_GET_END(s) * 16) -
22855 (((unsigned long) lvcc->rx.buf.ptr) -
22856 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22857 }
22858 if (s & SERVICE_STREAM) {
22859 read_unlock(&vcc_sklist_lock);
22860 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22861 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22862 lvcc->stats.x.aal5.service_stream++;
22863 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22864 "PDU on VCI %d!\n", lanai->number, vci);
22865 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22866 return 0;
22867 }
22868 DPRINTK("got rx crc error on vci %d\n", vci);
22869 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22870 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22871 lvcc->stats.x.aal5.service_rxcrc++;
22872 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22873 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22874 diff -urNp linux-2.6.39.4/drivers/atm/nicstar.c linux-2.6.39.4/drivers/atm/nicstar.c
22875 --- linux-2.6.39.4/drivers/atm/nicstar.c 2011-05-19 00:06:34.000000000 -0400
22876 +++ linux-2.6.39.4/drivers/atm/nicstar.c 2011-08-05 19:44:36.000000000 -0400
22877 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22878 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22879 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22880 card->index);
22881 - atomic_inc(&vcc->stats->tx_err);
22882 + atomic_inc_unchecked(&vcc->stats->tx_err);
22883 dev_kfree_skb_any(skb);
22884 return -EINVAL;
22885 }
22886 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22887 if (!vc->tx) {
22888 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22889 card->index);
22890 - atomic_inc(&vcc->stats->tx_err);
22891 + atomic_inc_unchecked(&vcc->stats->tx_err);
22892 dev_kfree_skb_any(skb);
22893 return -EINVAL;
22894 }
22895 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22896 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22897 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22898 card->index);
22899 - atomic_inc(&vcc->stats->tx_err);
22900 + atomic_inc_unchecked(&vcc->stats->tx_err);
22901 dev_kfree_skb_any(skb);
22902 return -EINVAL;
22903 }
22904
22905 if (skb_shinfo(skb)->nr_frags != 0) {
22906 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22907 - atomic_inc(&vcc->stats->tx_err);
22908 + atomic_inc_unchecked(&vcc->stats->tx_err);
22909 dev_kfree_skb_any(skb);
22910 return -EINVAL;
22911 }
22912 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22913 }
22914
22915 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22916 - atomic_inc(&vcc->stats->tx_err);
22917 + atomic_inc_unchecked(&vcc->stats->tx_err);
22918 dev_kfree_skb_any(skb);
22919 return -EIO;
22920 }
22921 - atomic_inc(&vcc->stats->tx);
22922 + atomic_inc_unchecked(&vcc->stats->tx);
22923
22924 return 0;
22925 }
22926 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22927 printk
22928 ("nicstar%d: Can't allocate buffers for aal0.\n",
22929 card->index);
22930 - atomic_add(i, &vcc->stats->rx_drop);
22931 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22932 break;
22933 }
22934 if (!atm_charge(vcc, sb->truesize)) {
22935 RXPRINTK
22936 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22937 card->index);
22938 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22939 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22940 dev_kfree_skb_any(sb);
22941 break;
22942 }
22943 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22944 ATM_SKB(sb)->vcc = vcc;
22945 __net_timestamp(sb);
22946 vcc->push(vcc, sb);
22947 - atomic_inc(&vcc->stats->rx);
22948 + atomic_inc_unchecked(&vcc->stats->rx);
22949 cell += ATM_CELL_PAYLOAD;
22950 }
22951
22952 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22953 if (iovb == NULL) {
22954 printk("nicstar%d: Out of iovec buffers.\n",
22955 card->index);
22956 - atomic_inc(&vcc->stats->rx_drop);
22957 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22958 recycle_rx_buf(card, skb);
22959 return;
22960 }
22961 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22962 small or large buffer itself. */
22963 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22964 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22965 - atomic_inc(&vcc->stats->rx_err);
22966 + atomic_inc_unchecked(&vcc->stats->rx_err);
22967 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22968 NS_MAX_IOVECS);
22969 NS_PRV_IOVCNT(iovb) = 0;
22970 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22971 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22972 card->index);
22973 which_list(card, skb);
22974 - atomic_inc(&vcc->stats->rx_err);
22975 + atomic_inc_unchecked(&vcc->stats->rx_err);
22976 recycle_rx_buf(card, skb);
22977 vc->rx_iov = NULL;
22978 recycle_iov_buf(card, iovb);
22979 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22980 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22981 card->index);
22982 which_list(card, skb);
22983 - atomic_inc(&vcc->stats->rx_err);
22984 + atomic_inc_unchecked(&vcc->stats->rx_err);
22985 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22986 NS_PRV_IOVCNT(iovb));
22987 vc->rx_iov = NULL;
22988 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22989 printk(" - PDU size mismatch.\n");
22990 else
22991 printk(".\n");
22992 - atomic_inc(&vcc->stats->rx_err);
22993 + atomic_inc_unchecked(&vcc->stats->rx_err);
22994 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22995 NS_PRV_IOVCNT(iovb));
22996 vc->rx_iov = NULL;
22997 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22998 /* skb points to a small buffer */
22999 if (!atm_charge(vcc, skb->truesize)) {
23000 push_rxbufs(card, skb);
23001 - atomic_inc(&vcc->stats->rx_drop);
23002 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23003 } else {
23004 skb_put(skb, len);
23005 dequeue_sm_buf(card, skb);
23006 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23007 ATM_SKB(skb)->vcc = vcc;
23008 __net_timestamp(skb);
23009 vcc->push(vcc, skb);
23010 - atomic_inc(&vcc->stats->rx);
23011 + atomic_inc_unchecked(&vcc->stats->rx);
23012 }
23013 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23014 struct sk_buff *sb;
23015 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23016 if (len <= NS_SMBUFSIZE) {
23017 if (!atm_charge(vcc, sb->truesize)) {
23018 push_rxbufs(card, sb);
23019 - atomic_inc(&vcc->stats->rx_drop);
23020 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23021 } else {
23022 skb_put(sb, len);
23023 dequeue_sm_buf(card, sb);
23024 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23025 ATM_SKB(sb)->vcc = vcc;
23026 __net_timestamp(sb);
23027 vcc->push(vcc, sb);
23028 - atomic_inc(&vcc->stats->rx);
23029 + atomic_inc_unchecked(&vcc->stats->rx);
23030 }
23031
23032 push_rxbufs(card, skb);
23033 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23034
23035 if (!atm_charge(vcc, skb->truesize)) {
23036 push_rxbufs(card, skb);
23037 - atomic_inc(&vcc->stats->rx_drop);
23038 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23039 } else {
23040 dequeue_lg_buf(card, skb);
23041 #ifdef NS_USE_DESTRUCTORS
23042 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23043 ATM_SKB(skb)->vcc = vcc;
23044 __net_timestamp(skb);
23045 vcc->push(vcc, skb);
23046 - atomic_inc(&vcc->stats->rx);
23047 + atomic_inc_unchecked(&vcc->stats->rx);
23048 }
23049
23050 push_rxbufs(card, sb);
23051 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23052 printk
23053 ("nicstar%d: Out of huge buffers.\n",
23054 card->index);
23055 - atomic_inc(&vcc->stats->rx_drop);
23056 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23057 recycle_iovec_rx_bufs(card,
23058 (struct iovec *)
23059 iovb->data,
23060 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23061 card->hbpool.count++;
23062 } else
23063 dev_kfree_skb_any(hb);
23064 - atomic_inc(&vcc->stats->rx_drop);
23065 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23066 } else {
23067 /* Copy the small buffer to the huge buffer */
23068 sb = (struct sk_buff *)iov->iov_base;
23069 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23070 #endif /* NS_USE_DESTRUCTORS */
23071 __net_timestamp(hb);
23072 vcc->push(vcc, hb);
23073 - atomic_inc(&vcc->stats->rx);
23074 + atomic_inc_unchecked(&vcc->stats->rx);
23075 }
23076 }
23077
23078 diff -urNp linux-2.6.39.4/drivers/atm/solos-pci.c linux-2.6.39.4/drivers/atm/solos-pci.c
23079 --- linux-2.6.39.4/drivers/atm/solos-pci.c 2011-05-19 00:06:34.000000000 -0400
23080 +++ linux-2.6.39.4/drivers/atm/solos-pci.c 2011-08-05 19:44:36.000000000 -0400
23081 @@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
23082 }
23083 atm_charge(vcc, skb->truesize);
23084 vcc->push(vcc, skb);
23085 - atomic_inc(&vcc->stats->rx);
23086 + atomic_inc_unchecked(&vcc->stats->rx);
23087 break;
23088
23089 case PKT_STATUS:
23090 @@ -900,6 +900,8 @@ static int print_buffer(struct sk_buff *
23091 char msg[500];
23092 char item[10];
23093
23094 + pax_track_stack();
23095 +
23096 len = buf->len;
23097 for (i = 0; i < len; i++){
23098 if(i % 8 == 0)
23099 @@ -1009,7 +1011,7 @@ static uint32_t fpga_tx(struct solos_car
23100 vcc = SKB_CB(oldskb)->vcc;
23101
23102 if (vcc) {
23103 - atomic_inc(&vcc->stats->tx);
23104 + atomic_inc_unchecked(&vcc->stats->tx);
23105 solos_pop(vcc, oldskb);
23106 } else
23107 dev_kfree_skb_irq(oldskb);
23108 diff -urNp linux-2.6.39.4/drivers/atm/suni.c linux-2.6.39.4/drivers/atm/suni.c
23109 --- linux-2.6.39.4/drivers/atm/suni.c 2011-05-19 00:06:34.000000000 -0400
23110 +++ linux-2.6.39.4/drivers/atm/suni.c 2011-08-05 19:44:36.000000000 -0400
23111 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23112
23113
23114 #define ADD_LIMITED(s,v) \
23115 - atomic_add((v),&stats->s); \
23116 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23117 + atomic_add_unchecked((v),&stats->s); \
23118 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23119
23120
23121 static void suni_hz(unsigned long from_timer)
23122 diff -urNp linux-2.6.39.4/drivers/atm/uPD98402.c linux-2.6.39.4/drivers/atm/uPD98402.c
23123 --- linux-2.6.39.4/drivers/atm/uPD98402.c 2011-05-19 00:06:34.000000000 -0400
23124 +++ linux-2.6.39.4/drivers/atm/uPD98402.c 2011-08-05 19:44:36.000000000 -0400
23125 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23126 struct sonet_stats tmp;
23127 int error = 0;
23128
23129 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23130 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23131 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23132 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23133 if (zero && !error) {
23134 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23135
23136
23137 #define ADD_LIMITED(s,v) \
23138 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23139 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23140 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23141 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23142 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23143 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23144
23145
23146 static void stat_event(struct atm_dev *dev)
23147 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23148 if (reason & uPD98402_INT_PFM) stat_event(dev);
23149 if (reason & uPD98402_INT_PCO) {
23150 (void) GET(PCOCR); /* clear interrupt cause */
23151 - atomic_add(GET(HECCT),
23152 + atomic_add_unchecked(GET(HECCT),
23153 &PRIV(dev)->sonet_stats.uncorr_hcs);
23154 }
23155 if ((reason & uPD98402_INT_RFO) &&
23156 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23157 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23158 uPD98402_INT_LOS),PIMR); /* enable them */
23159 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23160 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23161 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23162 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23163 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23164 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23165 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23166 return 0;
23167 }
23168
23169 diff -urNp linux-2.6.39.4/drivers/atm/zatm.c linux-2.6.39.4/drivers/atm/zatm.c
23170 --- linux-2.6.39.4/drivers/atm/zatm.c 2011-05-19 00:06:34.000000000 -0400
23171 +++ linux-2.6.39.4/drivers/atm/zatm.c 2011-08-05 19:44:36.000000000 -0400
23172 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23173 }
23174 if (!size) {
23175 dev_kfree_skb_irq(skb);
23176 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23177 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23178 continue;
23179 }
23180 if (!atm_charge(vcc,skb->truesize)) {
23181 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23182 skb->len = size;
23183 ATM_SKB(skb)->vcc = vcc;
23184 vcc->push(vcc,skb);
23185 - atomic_inc(&vcc->stats->rx);
23186 + atomic_inc_unchecked(&vcc->stats->rx);
23187 }
23188 zout(pos & 0xffff,MTA(mbx));
23189 #if 0 /* probably a stupid idea */
23190 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23191 skb_queue_head(&zatm_vcc->backlog,skb);
23192 break;
23193 }
23194 - atomic_inc(&vcc->stats->tx);
23195 + atomic_inc_unchecked(&vcc->stats->tx);
23196 wake_up(&zatm_vcc->tx_wait);
23197 }
23198
23199 diff -urNp linux-2.6.39.4/drivers/base/power/wakeup.c linux-2.6.39.4/drivers/base/power/wakeup.c
23200 --- linux-2.6.39.4/drivers/base/power/wakeup.c 2011-05-19 00:06:34.000000000 -0400
23201 +++ linux-2.6.39.4/drivers/base/power/wakeup.c 2011-08-05 19:44:36.000000000 -0400
23202 @@ -29,14 +29,14 @@ bool events_check_enabled;
23203 * They need to be modified together atomically, so it's better to use one
23204 * atomic variable to hold them both.
23205 */
23206 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23207 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23208
23209 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23210 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23211
23212 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23213 {
23214 - unsigned int comb = atomic_read(&combined_event_count);
23215 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23216
23217 *cnt = (comb >> IN_PROGRESS_BITS);
23218 *inpr = comb & MAX_IN_PROGRESS;
23219 @@ -351,7 +351,7 @@ static void wakeup_source_activate(struc
23220 ws->last_time = ktime_get();
23221
23222 /* Increment the counter of events in progress. */
23223 - atomic_inc(&combined_event_count);
23224 + atomic_inc_unchecked(&combined_event_count);
23225 }
23226
23227 /**
23228 @@ -441,7 +441,7 @@ static void wakeup_source_deactivate(str
23229 * Increment the counter of registered wakeup events and decrement the
23230 * couter of wakeup events in progress simultaneously.
23231 */
23232 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23233 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23234 }
23235
23236 /**
23237 diff -urNp linux-2.6.39.4/drivers/block/cciss.c linux-2.6.39.4/drivers/block/cciss.c
23238 --- linux-2.6.39.4/drivers/block/cciss.c 2011-05-19 00:06:34.000000000 -0400
23239 +++ linux-2.6.39.4/drivers/block/cciss.c 2011-08-05 20:34:06.000000000 -0400
23240 @@ -1151,6 +1151,8 @@ static int cciss_ioctl32_passthru(struct
23241 int err;
23242 u32 cp;
23243
23244 + memset(&arg64, 0, sizeof(arg64));
23245 +
23246 err = 0;
23247 err |=
23248 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23249 @@ -2933,7 +2935,7 @@ static void start_io(ctlr_info_t *h)
23250 while (!list_empty(&h->reqQ)) {
23251 c = list_entry(h->reqQ.next, CommandList_struct, list);
23252 /* can't do anything if fifo is full */
23253 - if ((h->access.fifo_full(h))) {
23254 + if ((h->access->fifo_full(h))) {
23255 dev_warn(&h->pdev->dev, "fifo full\n");
23256 break;
23257 }
23258 @@ -2943,7 +2945,7 @@ static void start_io(ctlr_info_t *h)
23259 h->Qdepth--;
23260
23261 /* Tell the controller execute command */
23262 - h->access.submit_command(h, c);
23263 + h->access->submit_command(h, c);
23264
23265 /* Put job onto the completed Q */
23266 addQ(&h->cmpQ, c);
23267 @@ -3369,17 +3371,17 @@ startio:
23268
23269 static inline unsigned long get_next_completion(ctlr_info_t *h)
23270 {
23271 - return h->access.command_completed(h);
23272 + return h->access->command_completed(h);
23273 }
23274
23275 static inline int interrupt_pending(ctlr_info_t *h)
23276 {
23277 - return h->access.intr_pending(h);
23278 + return h->access->intr_pending(h);
23279 }
23280
23281 static inline long interrupt_not_for_us(ctlr_info_t *h)
23282 {
23283 - return ((h->access.intr_pending(h) == 0) ||
23284 + return ((h->access->intr_pending(h) == 0) ||
23285 (h->interrupts_enabled == 0));
23286 }
23287
23288 @@ -3412,7 +3414,7 @@ static inline u32 next_command(ctlr_info
23289 u32 a;
23290
23291 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23292 - return h->access.command_completed(h);
23293 + return h->access->command_completed(h);
23294
23295 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23296 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23297 @@ -3910,7 +3912,7 @@ static void __devinit cciss_put_controll
23298 trans_support & CFGTBL_Trans_use_short_tags);
23299
23300 /* Change the access methods to the performant access methods */
23301 - h->access = SA5_performant_access;
23302 + h->access = &SA5_performant_access;
23303 h->transMethod = CFGTBL_Trans_Performant;
23304
23305 return;
23306 @@ -4179,7 +4181,7 @@ static int __devinit cciss_pci_init(ctlr
23307 if (prod_index < 0)
23308 return -ENODEV;
23309 h->product_name = products[prod_index].product_name;
23310 - h->access = *(products[prod_index].access);
23311 + h->access = products[prod_index].access;
23312
23313 if (cciss_board_disabled(h)) {
23314 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23315 @@ -4661,7 +4663,7 @@ static int __devinit cciss_init_one(stru
23316 }
23317
23318 /* make sure the board interrupts are off */
23319 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23320 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23321 if (h->msi_vector || h->msix_vector) {
23322 if (request_irq(h->intr[PERF_MODE_INT],
23323 do_cciss_msix_intr,
23324 @@ -4744,7 +4746,7 @@ static int __devinit cciss_init_one(stru
23325 cciss_scsi_setup(h);
23326
23327 /* Turn the interrupts on so we can service requests */
23328 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23329 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23330
23331 /* Get the firmware version */
23332 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23333 @@ -4828,7 +4830,7 @@ static void cciss_shutdown(struct pci_de
23334 kfree(flush_buf);
23335 if (return_code != IO_OK)
23336 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23337 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23338 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23339 free_irq(h->intr[PERF_MODE_INT], h);
23340 }
23341
23342 diff -urNp linux-2.6.39.4/drivers/block/cciss.h linux-2.6.39.4/drivers/block/cciss.h
23343 --- linux-2.6.39.4/drivers/block/cciss.h 2011-05-19 00:06:34.000000000 -0400
23344 +++ linux-2.6.39.4/drivers/block/cciss.h 2011-08-05 20:34:06.000000000 -0400
23345 @@ -100,7 +100,7 @@ struct ctlr_info
23346 /* information about each logical volume */
23347 drive_info_struct *drv[CISS_MAX_LUN];
23348
23349 - struct access_method access;
23350 + struct access_method *access;
23351
23352 /* queue and queue Info */
23353 struct list_head reqQ;
23354 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.c linux-2.6.39.4/drivers/block/cpqarray.c
23355 --- linux-2.6.39.4/drivers/block/cpqarray.c 2011-05-19 00:06:34.000000000 -0400
23356 +++ linux-2.6.39.4/drivers/block/cpqarray.c 2011-08-05 20:34:06.000000000 -0400
23357 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23358 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23359 goto Enomem4;
23360 }
23361 - hba[i]->access.set_intr_mask(hba[i], 0);
23362 + hba[i]->access->set_intr_mask(hba[i], 0);
23363 if (request_irq(hba[i]->intr, do_ida_intr,
23364 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23365 {
23366 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23367 add_timer(&hba[i]->timer);
23368
23369 /* Enable IRQ now that spinlock and rate limit timer are set up */
23370 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23371 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23372
23373 for(j=0; j<NWD; j++) {
23374 struct gendisk *disk = ida_gendisk[i][j];
23375 @@ -694,7 +694,7 @@ DBGINFO(
23376 for(i=0; i<NR_PRODUCTS; i++) {
23377 if (board_id == products[i].board_id) {
23378 c->product_name = products[i].product_name;
23379 - c->access = *(products[i].access);
23380 + c->access = products[i].access;
23381 break;
23382 }
23383 }
23384 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23385 hba[ctlr]->intr = intr;
23386 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23387 hba[ctlr]->product_name = products[j].product_name;
23388 - hba[ctlr]->access = *(products[j].access);
23389 + hba[ctlr]->access = products[j].access;
23390 hba[ctlr]->ctlr = ctlr;
23391 hba[ctlr]->board_id = board_id;
23392 hba[ctlr]->pci_dev = NULL; /* not PCI */
23393 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23394 struct scatterlist tmp_sg[SG_MAX];
23395 int i, dir, seg;
23396
23397 + pax_track_stack();
23398 +
23399 queue_next:
23400 creq = blk_peek_request(q);
23401 if (!creq)
23402 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23403
23404 while((c = h->reqQ) != NULL) {
23405 /* Can't do anything if we're busy */
23406 - if (h->access.fifo_full(h) == 0)
23407 + if (h->access->fifo_full(h) == 0)
23408 return;
23409
23410 /* Get the first entry from the request Q */
23411 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23412 h->Qdepth--;
23413
23414 /* Tell the controller to do our bidding */
23415 - h->access.submit_command(h, c);
23416 + h->access->submit_command(h, c);
23417
23418 /* Get onto the completion Q */
23419 addQ(&h->cmpQ, c);
23420 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23421 unsigned long flags;
23422 __u32 a,a1;
23423
23424 - istat = h->access.intr_pending(h);
23425 + istat = h->access->intr_pending(h);
23426 /* Is this interrupt for us? */
23427 if (istat == 0)
23428 return IRQ_NONE;
23429 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23430 */
23431 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23432 if (istat & FIFO_NOT_EMPTY) {
23433 - while((a = h->access.command_completed(h))) {
23434 + while((a = h->access->command_completed(h))) {
23435 a1 = a; a &= ~3;
23436 if ((c = h->cmpQ) == NULL)
23437 {
23438 @@ -1449,11 +1451,11 @@ static int sendcmd(
23439 /*
23440 * Disable interrupt
23441 */
23442 - info_p->access.set_intr_mask(info_p, 0);
23443 + info_p->access->set_intr_mask(info_p, 0);
23444 /* Make sure there is room in the command FIFO */
23445 /* Actually it should be completely empty at this time. */
23446 for (i = 200000; i > 0; i--) {
23447 - temp = info_p->access.fifo_full(info_p);
23448 + temp = info_p->access->fifo_full(info_p);
23449 if (temp != 0) {
23450 break;
23451 }
23452 @@ -1466,7 +1468,7 @@ DBG(
23453 /*
23454 * Send the cmd
23455 */
23456 - info_p->access.submit_command(info_p, c);
23457 + info_p->access->submit_command(info_p, c);
23458 complete = pollcomplete(ctlr);
23459
23460 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23461 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23462 * we check the new geometry. Then turn interrupts back on when
23463 * we're done.
23464 */
23465 - host->access.set_intr_mask(host, 0);
23466 + host->access->set_intr_mask(host, 0);
23467 getgeometry(ctlr);
23468 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23469 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23470
23471 for(i=0; i<NWD; i++) {
23472 struct gendisk *disk = ida_gendisk[ctlr][i];
23473 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23474 /* Wait (up to 2 seconds) for a command to complete */
23475
23476 for (i = 200000; i > 0; i--) {
23477 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23478 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23479 if (done == 0) {
23480 udelay(10); /* a short fixed delay */
23481 } else
23482 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.h linux-2.6.39.4/drivers/block/cpqarray.h
23483 --- linux-2.6.39.4/drivers/block/cpqarray.h 2011-05-19 00:06:34.000000000 -0400
23484 +++ linux-2.6.39.4/drivers/block/cpqarray.h 2011-08-05 20:34:06.000000000 -0400
23485 @@ -99,7 +99,7 @@ struct ctlr_info {
23486 drv_info_t drv[NWD];
23487 struct proc_dir_entry *proc;
23488
23489 - struct access_method access;
23490 + struct access_method *access;
23491
23492 cmdlist_t *reqQ;
23493 cmdlist_t *cmpQ;
23494 diff -urNp linux-2.6.39.4/drivers/block/DAC960.c linux-2.6.39.4/drivers/block/DAC960.c
23495 --- linux-2.6.39.4/drivers/block/DAC960.c 2011-05-19 00:06:34.000000000 -0400
23496 +++ linux-2.6.39.4/drivers/block/DAC960.c 2011-08-05 19:44:36.000000000 -0400
23497 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23498 unsigned long flags;
23499 int Channel, TargetID;
23500
23501 + pax_track_stack();
23502 +
23503 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23504 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23505 sizeof(DAC960_SCSI_Inquiry_T) +
23506 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_int.h linux-2.6.39.4/drivers/block/drbd/drbd_int.h
23507 --- linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-05-19 00:06:34.000000000 -0400
23508 +++ linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-08-05 19:44:36.000000000 -0400
23509 @@ -736,7 +736,7 @@ struct drbd_request;
23510 struct drbd_epoch {
23511 struct list_head list;
23512 unsigned int barrier_nr;
23513 - atomic_t epoch_size; /* increased on every request added. */
23514 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23515 atomic_t active; /* increased on every req. added, and dec on every finished. */
23516 unsigned long flags;
23517 };
23518 @@ -1108,7 +1108,7 @@ struct drbd_conf {
23519 void *int_dig_in;
23520 void *int_dig_vv;
23521 wait_queue_head_t seq_wait;
23522 - atomic_t packet_seq;
23523 + atomic_unchecked_t packet_seq;
23524 unsigned int peer_seq;
23525 spinlock_t peer_seq_lock;
23526 unsigned int minor;
23527 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_main.c linux-2.6.39.4/drivers/block/drbd/drbd_main.c
23528 --- linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-05-19 00:06:34.000000000 -0400
23529 +++ linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-08-05 19:44:36.000000000 -0400
23530 @@ -2387,7 +2387,7 @@ static int _drbd_send_ack(struct drbd_co
23531 p.sector = sector;
23532 p.block_id = block_id;
23533 p.blksize = blksize;
23534 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23535 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23536
23537 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23538 return false;
23539 @@ -2686,7 +2686,7 @@ int drbd_send_dblock(struct drbd_conf *m
23540 p.sector = cpu_to_be64(req->sector);
23541 p.block_id = (unsigned long)req;
23542 p.seq_num = cpu_to_be32(req->seq_num =
23543 - atomic_add_return(1, &mdev->packet_seq));
23544 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23545
23546 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23547
23548 @@ -2971,7 +2971,7 @@ void drbd_init_set_defaults(struct drbd_
23549 atomic_set(&mdev->unacked_cnt, 0);
23550 atomic_set(&mdev->local_cnt, 0);
23551 atomic_set(&mdev->net_cnt, 0);
23552 - atomic_set(&mdev->packet_seq, 0);
23553 + atomic_set_unchecked(&mdev->packet_seq, 0);
23554 atomic_set(&mdev->pp_in_use, 0);
23555 atomic_set(&mdev->pp_in_use_by_net, 0);
23556 atomic_set(&mdev->rs_sect_in, 0);
23557 @@ -3051,8 +3051,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23558 mdev->receiver.t_state);
23559
23560 /* no need to lock it, I'm the only thread alive */
23561 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23562 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23563 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23564 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23565 mdev->al_writ_cnt =
23566 mdev->bm_writ_cnt =
23567 mdev->read_cnt =
23568 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_nl.c linux-2.6.39.4/drivers/block/drbd/drbd_nl.c
23569 --- linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-05-19 00:06:34.000000000 -0400
23570 +++ linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-08-05 19:44:36.000000000 -0400
23571 @@ -2298,7 +2298,7 @@ static void drbd_connector_callback(stru
23572 module_put(THIS_MODULE);
23573 }
23574
23575 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23576 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23577
23578 static unsigned short *
23579 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23580 @@ -2369,7 +2369,7 @@ void drbd_bcast_state(struct drbd_conf *
23581 cn_reply->id.idx = CN_IDX_DRBD;
23582 cn_reply->id.val = CN_VAL_DRBD;
23583
23584 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23585 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23586 cn_reply->ack = 0; /* not used here. */
23587 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23588 (int)((char *)tl - (char *)reply->tag_list);
23589 @@ -2401,7 +2401,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23590 cn_reply->id.idx = CN_IDX_DRBD;
23591 cn_reply->id.val = CN_VAL_DRBD;
23592
23593 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23594 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23595 cn_reply->ack = 0; /* not used here. */
23596 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23597 (int)((char *)tl - (char *)reply->tag_list);
23598 @@ -2479,7 +2479,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23599 cn_reply->id.idx = CN_IDX_DRBD;
23600 cn_reply->id.val = CN_VAL_DRBD;
23601
23602 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23603 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23604 cn_reply->ack = 0; // not used here.
23605 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23606 (int)((char*)tl - (char*)reply->tag_list);
23607 @@ -2518,7 +2518,7 @@ void drbd_bcast_sync_progress(struct drb
23608 cn_reply->id.idx = CN_IDX_DRBD;
23609 cn_reply->id.val = CN_VAL_DRBD;
23610
23611 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23612 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23613 cn_reply->ack = 0; /* not used here. */
23614 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23615 (int)((char *)tl - (char *)reply->tag_list);
23616 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c
23617 --- linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-05-19 00:06:34.000000000 -0400
23618 +++ linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-08-05 19:44:36.000000000 -0400
23619 @@ -894,7 +894,7 @@ retry:
23620 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23621 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23622
23623 - atomic_set(&mdev->packet_seq, 0);
23624 + atomic_set_unchecked(&mdev->packet_seq, 0);
23625 mdev->peer_seq = 0;
23626
23627 drbd_thread_start(&mdev->asender);
23628 @@ -990,7 +990,7 @@ static enum finish_epoch drbd_may_finish
23629 do {
23630 next_epoch = NULL;
23631
23632 - epoch_size = atomic_read(&epoch->epoch_size);
23633 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23634
23635 switch (ev & ~EV_CLEANUP) {
23636 case EV_PUT:
23637 @@ -1025,7 +1025,7 @@ static enum finish_epoch drbd_may_finish
23638 rv = FE_DESTROYED;
23639 } else {
23640 epoch->flags = 0;
23641 - atomic_set(&epoch->epoch_size, 0);
23642 + atomic_set_unchecked(&epoch->epoch_size, 0);
23643 /* atomic_set(&epoch->active, 0); is already zero */
23644 if (rv == FE_STILL_LIVE)
23645 rv = FE_RECYCLED;
23646 @@ -1196,14 +1196,14 @@ static int receive_Barrier(struct drbd_c
23647 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23648 drbd_flush(mdev);
23649
23650 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23651 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23652 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23653 if (epoch)
23654 break;
23655 }
23656
23657 epoch = mdev->current_epoch;
23658 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23659 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23660
23661 D_ASSERT(atomic_read(&epoch->active) == 0);
23662 D_ASSERT(epoch->flags == 0);
23663 @@ -1215,11 +1215,11 @@ static int receive_Barrier(struct drbd_c
23664 }
23665
23666 epoch->flags = 0;
23667 - atomic_set(&epoch->epoch_size, 0);
23668 + atomic_set_unchecked(&epoch->epoch_size, 0);
23669 atomic_set(&epoch->active, 0);
23670
23671 spin_lock(&mdev->epoch_lock);
23672 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23673 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23674 list_add(&epoch->list, &mdev->current_epoch->list);
23675 mdev->current_epoch = epoch;
23676 mdev->epochs++;
23677 @@ -1668,7 +1668,7 @@ static int receive_Data(struct drbd_conf
23678 spin_unlock(&mdev->peer_seq_lock);
23679
23680 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23681 - atomic_inc(&mdev->current_epoch->epoch_size);
23682 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23683 return drbd_drain_block(mdev, data_size);
23684 }
23685
23686 @@ -1694,7 +1694,7 @@ static int receive_Data(struct drbd_conf
23687
23688 spin_lock(&mdev->epoch_lock);
23689 e->epoch = mdev->current_epoch;
23690 - atomic_inc(&e->epoch->epoch_size);
23691 + atomic_inc_unchecked(&e->epoch->epoch_size);
23692 atomic_inc(&e->epoch->active);
23693 spin_unlock(&mdev->epoch_lock);
23694
23695 @@ -3905,7 +3905,7 @@ static void drbd_disconnect(struct drbd_
23696 D_ASSERT(list_empty(&mdev->done_ee));
23697
23698 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23699 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23700 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23701 D_ASSERT(list_empty(&mdev->current_epoch->list));
23702 }
23703
23704 diff -urNp linux-2.6.39.4/drivers/block/nbd.c linux-2.6.39.4/drivers/block/nbd.c
23705 --- linux-2.6.39.4/drivers/block/nbd.c 2011-06-25 12:55:22.000000000 -0400
23706 +++ linux-2.6.39.4/drivers/block/nbd.c 2011-08-05 19:44:36.000000000 -0400
23707 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23708 struct kvec iov;
23709 sigset_t blocked, oldset;
23710
23711 + pax_track_stack();
23712 +
23713 if (unlikely(!sock)) {
23714 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23715 lo->disk->disk_name, (send ? "send" : "recv"));
23716 @@ -571,6 +573,8 @@ static void do_nbd_request(struct reques
23717 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23718 unsigned int cmd, unsigned long arg)
23719 {
23720 + pax_track_stack();
23721 +
23722 switch (cmd) {
23723 case NBD_DISCONNECT: {
23724 struct request sreq;
23725 diff -urNp linux-2.6.39.4/drivers/char/agp/frontend.c linux-2.6.39.4/drivers/char/agp/frontend.c
23726 --- linux-2.6.39.4/drivers/char/agp/frontend.c 2011-05-19 00:06:34.000000000 -0400
23727 +++ linux-2.6.39.4/drivers/char/agp/frontend.c 2011-08-05 19:44:36.000000000 -0400
23728 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23729 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23730 return -EFAULT;
23731
23732 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23733 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23734 return -EFAULT;
23735
23736 client = agp_find_client_by_pid(reserve.pid);
23737 diff -urNp linux-2.6.39.4/drivers/char/briq_panel.c linux-2.6.39.4/drivers/char/briq_panel.c
23738 --- linux-2.6.39.4/drivers/char/briq_panel.c 2011-05-19 00:06:34.000000000 -0400
23739 +++ linux-2.6.39.4/drivers/char/briq_panel.c 2011-08-05 19:44:36.000000000 -0400
23740 @@ -9,6 +9,7 @@
23741 #include <linux/types.h>
23742 #include <linux/errno.h>
23743 #include <linux/tty.h>
23744 +#include <linux/mutex.h>
23745 #include <linux/timer.h>
23746 #include <linux/kernel.h>
23747 #include <linux/wait.h>
23748 @@ -34,6 +35,7 @@ static int vfd_is_open;
23749 static unsigned char vfd[40];
23750 static int vfd_cursor;
23751 static unsigned char ledpb, led;
23752 +static DEFINE_MUTEX(vfd_mutex);
23753
23754 static void update_vfd(void)
23755 {
23756 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23757 if (!vfd_is_open)
23758 return -EBUSY;
23759
23760 + mutex_lock(&vfd_mutex);
23761 for (;;) {
23762 char c;
23763 if (!indx)
23764 break;
23765 - if (get_user(c, buf))
23766 + if (get_user(c, buf)) {
23767 + mutex_unlock(&vfd_mutex);
23768 return -EFAULT;
23769 + }
23770 if (esc) {
23771 set_led(c);
23772 esc = 0;
23773 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23774 buf++;
23775 }
23776 update_vfd();
23777 + mutex_unlock(&vfd_mutex);
23778
23779 return len;
23780 }
23781 diff -urNp linux-2.6.39.4/drivers/char/genrtc.c linux-2.6.39.4/drivers/char/genrtc.c
23782 --- linux-2.6.39.4/drivers/char/genrtc.c 2011-05-19 00:06:34.000000000 -0400
23783 +++ linux-2.6.39.4/drivers/char/genrtc.c 2011-08-05 19:44:36.000000000 -0400
23784 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23785 switch (cmd) {
23786
23787 case RTC_PLL_GET:
23788 + memset(&pll, 0, sizeof(pll));
23789 if (get_rtc_pll(&pll))
23790 return -EINVAL;
23791 else
23792 diff -urNp linux-2.6.39.4/drivers/char/hpet.c linux-2.6.39.4/drivers/char/hpet.c
23793 --- linux-2.6.39.4/drivers/char/hpet.c 2011-05-19 00:06:34.000000000 -0400
23794 +++ linux-2.6.39.4/drivers/char/hpet.c 2011-08-05 19:44:36.000000000 -0400
23795 @@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di
23796 }
23797
23798 static int
23799 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23800 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23801 struct hpet_info *info)
23802 {
23803 struct hpet_timer __iomem *timer;
23804 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c
23805 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-05-19 00:06:34.000000000 -0400
23806 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-05 20:34:06.000000000 -0400
23807 @@ -414,7 +414,7 @@ struct ipmi_smi {
23808 struct proc_dir_entry *proc_dir;
23809 char proc_dir_name[10];
23810
23811 - atomic_t stats[IPMI_NUM_STATS];
23812 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23813
23814 /*
23815 * run_to_completion duplicate of smb_info, smi_info
23816 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23817
23818
23819 #define ipmi_inc_stat(intf, stat) \
23820 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23821 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23822 #define ipmi_get_stat(intf, stat) \
23823 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23824 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23825
23826 static int is_lan_addr(struct ipmi_addr *addr)
23827 {
23828 @@ -2844,7 +2844,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23829 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23830 init_waitqueue_head(&intf->waitq);
23831 for (i = 0; i < IPMI_NUM_STATS; i++)
23832 - atomic_set(&intf->stats[i], 0);
23833 + atomic_set_unchecked(&intf->stats[i], 0);
23834
23835 intf->proc_dir = NULL;
23836
23837 @@ -4196,6 +4196,8 @@ static void send_panic_events(char *str)
23838 struct ipmi_smi_msg smi_msg;
23839 struct ipmi_recv_msg recv_msg;
23840
23841 + pax_track_stack();
23842 +
23843 si = (struct ipmi_system_interface_addr *) &addr;
23844 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23845 si->channel = IPMI_BMC_CHANNEL;
23846 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c
23847 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-05-19 00:06:34.000000000 -0400
23848 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-05 19:44:36.000000000 -0400
23849 @@ -276,7 +276,7 @@ struct smi_info {
23850 unsigned char slave_addr;
23851
23852 /* Counters and things for the proc filesystem. */
23853 - atomic_t stats[SI_NUM_STATS];
23854 + atomic_unchecked_t stats[SI_NUM_STATS];
23855
23856 struct task_struct *thread;
23857
23858 @@ -285,9 +285,9 @@ struct smi_info {
23859 };
23860
23861 #define smi_inc_stat(smi, stat) \
23862 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23863 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23864 #define smi_get_stat(smi, stat) \
23865 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23866 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23867
23868 #define SI_MAX_PARMS 4
23869
23870 @@ -3198,7 +3198,7 @@ static int try_smi_init(struct smi_info
23871 atomic_set(&new_smi->req_events, 0);
23872 new_smi->run_to_completion = 0;
23873 for (i = 0; i < SI_NUM_STATS; i++)
23874 - atomic_set(&new_smi->stats[i], 0);
23875 + atomic_set_unchecked(&new_smi->stats[i], 0);
23876
23877 new_smi->interrupt_disabled = 1;
23878 atomic_set(&new_smi->stop_operation, 0);
23879 diff -urNp linux-2.6.39.4/drivers/char/Kconfig linux-2.6.39.4/drivers/char/Kconfig
23880 --- linux-2.6.39.4/drivers/char/Kconfig 2011-05-19 00:06:34.000000000 -0400
23881 +++ linux-2.6.39.4/drivers/char/Kconfig 2011-08-05 19:44:36.000000000 -0400
23882 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23883
23884 config DEVKMEM
23885 bool "/dev/kmem virtual device support"
23886 - default y
23887 + default n
23888 + depends on !GRKERNSEC_KMEM
23889 help
23890 Say Y here if you want to support the /dev/kmem device. The
23891 /dev/kmem device is rarely used, but can be used for certain
23892 @@ -596,6 +597,7 @@ config DEVPORT
23893 bool
23894 depends on !M68K
23895 depends on ISA || PCI
23896 + depends on !GRKERNSEC_KMEM
23897 default y
23898
23899 source "drivers/s390/char/Kconfig"
23900 diff -urNp linux-2.6.39.4/drivers/char/mem.c linux-2.6.39.4/drivers/char/mem.c
23901 --- linux-2.6.39.4/drivers/char/mem.c 2011-05-19 00:06:34.000000000 -0400
23902 +++ linux-2.6.39.4/drivers/char/mem.c 2011-08-05 19:44:36.000000000 -0400
23903 @@ -18,6 +18,7 @@
23904 #include <linux/raw.h>
23905 #include <linux/tty.h>
23906 #include <linux/capability.h>
23907 +#include <linux/security.h>
23908 #include <linux/ptrace.h>
23909 #include <linux/device.h>
23910 #include <linux/highmem.h>
23911 @@ -34,6 +35,10 @@
23912 # include <linux/efi.h>
23913 #endif
23914
23915 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23916 +extern struct file_operations grsec_fops;
23917 +#endif
23918 +
23919 static inline unsigned long size_inside_page(unsigned long start,
23920 unsigned long size)
23921 {
23922 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23923
23924 while (cursor < to) {
23925 if (!devmem_is_allowed(pfn)) {
23926 +#ifdef CONFIG_GRKERNSEC_KMEM
23927 + gr_handle_mem_readwrite(from, to);
23928 +#else
23929 printk(KERN_INFO
23930 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23931 current->comm, from, to);
23932 +#endif
23933 return 0;
23934 }
23935 cursor += PAGE_SIZE;
23936 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23937 }
23938 return 1;
23939 }
23940 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23941 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23942 +{
23943 + return 0;
23944 +}
23945 #else
23946 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23947 {
23948 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23949
23950 while (count > 0) {
23951 unsigned long remaining;
23952 + char *temp;
23953
23954 sz = size_inside_page(p, count);
23955
23956 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23957 if (!ptr)
23958 return -EFAULT;
23959
23960 - remaining = copy_to_user(buf, ptr, sz);
23961 +#ifdef CONFIG_PAX_USERCOPY
23962 + temp = kmalloc(sz, GFP_KERNEL);
23963 + if (!temp) {
23964 + unxlate_dev_mem_ptr(p, ptr);
23965 + return -ENOMEM;
23966 + }
23967 + memcpy(temp, ptr, sz);
23968 +#else
23969 + temp = ptr;
23970 +#endif
23971 +
23972 + remaining = copy_to_user(buf, temp, sz);
23973 +
23974 +#ifdef CONFIG_PAX_USERCOPY
23975 + kfree(temp);
23976 +#endif
23977 +
23978 unxlate_dev_mem_ptr(p, ptr);
23979 if (remaining)
23980 return -EFAULT;
23981 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23982 size_t count, loff_t *ppos)
23983 {
23984 unsigned long p = *ppos;
23985 - ssize_t low_count, read, sz;
23986 + ssize_t low_count, read, sz, err = 0;
23987 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23988 - int err = 0;
23989
23990 read = 0;
23991 if (p < (unsigned long) high_memory) {
23992 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23993 }
23994 #endif
23995 while (low_count > 0) {
23996 + char *temp;
23997 +
23998 sz = size_inside_page(p, low_count);
23999
24000 /*
24001 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
24002 */
24003 kbuf = xlate_dev_kmem_ptr((char *)p);
24004
24005 - if (copy_to_user(buf, kbuf, sz))
24006 +#ifdef CONFIG_PAX_USERCOPY
24007 + temp = kmalloc(sz, GFP_KERNEL);
24008 + if (!temp)
24009 + return -ENOMEM;
24010 + memcpy(temp, kbuf, sz);
24011 +#else
24012 + temp = kbuf;
24013 +#endif
24014 +
24015 + err = copy_to_user(buf, temp, sz);
24016 +
24017 +#ifdef CONFIG_PAX_USERCOPY
24018 + kfree(temp);
24019 +#endif
24020 +
24021 + if (err)
24022 return -EFAULT;
24023 buf += sz;
24024 p += sz;
24025 @@ -854,6 +901,9 @@ static const struct memdev {
24026 #ifdef CONFIG_CRASH_DUMP
24027 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24028 #endif
24029 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24030 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24031 +#endif
24032 };
24033
24034 static int memory_open(struct inode *inode, struct file *filp)
24035 diff -urNp linux-2.6.39.4/drivers/char/nvram.c linux-2.6.39.4/drivers/char/nvram.c
24036 --- linux-2.6.39.4/drivers/char/nvram.c 2011-05-19 00:06:34.000000000 -0400
24037 +++ linux-2.6.39.4/drivers/char/nvram.c 2011-08-05 19:44:36.000000000 -0400
24038 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24039
24040 spin_unlock_irq(&rtc_lock);
24041
24042 - if (copy_to_user(buf, contents, tmp - contents))
24043 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24044 return -EFAULT;
24045
24046 *ppos = i;
24047 diff -urNp linux-2.6.39.4/drivers/char/random.c linux-2.6.39.4/drivers/char/random.c
24048 --- linux-2.6.39.4/drivers/char/random.c 2011-05-19 00:06:34.000000000 -0400
24049 +++ linux-2.6.39.4/drivers/char/random.c 2011-08-05 19:44:36.000000000 -0400
24050 @@ -261,8 +261,13 @@
24051 /*
24052 * Configuration information
24053 */
24054 +#ifdef CONFIG_GRKERNSEC_RANDNET
24055 +#define INPUT_POOL_WORDS 512
24056 +#define OUTPUT_POOL_WORDS 128
24057 +#else
24058 #define INPUT_POOL_WORDS 128
24059 #define OUTPUT_POOL_WORDS 32
24060 +#endif
24061 #define SEC_XFER_SIZE 512
24062 #define EXTRACT_SIZE 10
24063
24064 @@ -300,10 +305,17 @@ static struct poolinfo {
24065 int poolwords;
24066 int tap1, tap2, tap3, tap4, tap5;
24067 } poolinfo_table[] = {
24068 +#ifdef CONFIG_GRKERNSEC_RANDNET
24069 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24070 + { 512, 411, 308, 208, 104, 1 },
24071 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24072 + { 128, 103, 76, 51, 25, 1 },
24073 +#else
24074 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24075 { 128, 103, 76, 51, 25, 1 },
24076 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24077 { 32, 26, 20, 14, 7, 1 },
24078 +#endif
24079 #if 0
24080 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24081 { 2048, 1638, 1231, 819, 411, 1 },
24082 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24083
24084 extract_buf(r, tmp);
24085 i = min_t(int, nbytes, EXTRACT_SIZE);
24086 - if (copy_to_user(buf, tmp, i)) {
24087 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24088 ret = -EFAULT;
24089 break;
24090 }
24091 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24092 #include <linux/sysctl.h>
24093
24094 static int min_read_thresh = 8, min_write_thresh;
24095 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
24096 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24097 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24098 static char sysctl_bootid[16];
24099
24100 diff -urNp linux-2.6.39.4/drivers/char/sonypi.c linux-2.6.39.4/drivers/char/sonypi.c
24101 --- linux-2.6.39.4/drivers/char/sonypi.c 2011-05-19 00:06:34.000000000 -0400
24102 +++ linux-2.6.39.4/drivers/char/sonypi.c 2011-08-05 19:44:36.000000000 -0400
24103 @@ -55,6 +55,7 @@
24104 #include <asm/uaccess.h>
24105 #include <asm/io.h>
24106 #include <asm/system.h>
24107 +#include <asm/local.h>
24108
24109 #include <linux/sonypi.h>
24110
24111 @@ -491,7 +492,7 @@ static struct sonypi_device {
24112 spinlock_t fifo_lock;
24113 wait_queue_head_t fifo_proc_list;
24114 struct fasync_struct *fifo_async;
24115 - int open_count;
24116 + local_t open_count;
24117 int model;
24118 struct input_dev *input_jog_dev;
24119 struct input_dev *input_key_dev;
24120 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24121 static int sonypi_misc_release(struct inode *inode, struct file *file)
24122 {
24123 mutex_lock(&sonypi_device.lock);
24124 - sonypi_device.open_count--;
24125 + local_dec(&sonypi_device.open_count);
24126 mutex_unlock(&sonypi_device.lock);
24127 return 0;
24128 }
24129 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24130 {
24131 mutex_lock(&sonypi_device.lock);
24132 /* Flush input queue on first open */
24133 - if (!sonypi_device.open_count)
24134 + if (!local_read(&sonypi_device.open_count))
24135 kfifo_reset(&sonypi_device.fifo);
24136 - sonypi_device.open_count++;
24137 + local_inc(&sonypi_device.open_count);
24138 mutex_unlock(&sonypi_device.lock);
24139
24140 return 0;
24141 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm_bios.c linux-2.6.39.4/drivers/char/tpm/tpm_bios.c
24142 --- linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-05-19 00:06:34.000000000 -0400
24143 +++ linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-08-05 19:44:36.000000000 -0400
24144 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24145 event = addr;
24146
24147 if ((event->event_type == 0 && event->event_size == 0) ||
24148 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24149 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24150 return NULL;
24151
24152 return addr;
24153 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24154 return NULL;
24155
24156 if ((event->event_type == 0 && event->event_size == 0) ||
24157 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24158 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24159 return NULL;
24160
24161 (*pos)++;
24162 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24163 int i;
24164
24165 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24166 - seq_putc(m, data[i]);
24167 + if (!seq_putc(m, data[i]))
24168 + return -EFAULT;
24169
24170 return 0;
24171 }
24172 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24173 log->bios_event_log_end = log->bios_event_log + len;
24174
24175 virt = acpi_os_map_memory(start, len);
24176 + if (!virt) {
24177 + kfree(log->bios_event_log);
24178 + log->bios_event_log = NULL;
24179 + return -EFAULT;
24180 + }
24181
24182 memcpy(log->bios_event_log, virt, len);
24183
24184 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm.c linux-2.6.39.4/drivers/char/tpm/tpm.c
24185 --- linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-05-19 00:06:34.000000000 -0400
24186 +++ linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-08-05 19:44:36.000000000 -0400
24187 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24188 chip->vendor.req_complete_val)
24189 goto out_recv;
24190
24191 - if ((status == chip->vendor.req_canceled)) {
24192 + if (status == chip->vendor.req_canceled) {
24193 dev_err(chip->dev, "Operation Canceled\n");
24194 rc = -ECANCELED;
24195 goto out;
24196 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24197
24198 struct tpm_chip *chip = dev_get_drvdata(dev);
24199
24200 + pax_track_stack();
24201 +
24202 tpm_cmd.header.in = tpm_readpubek_header;
24203 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24204 "attempting to read the PUBEK");
24205 diff -urNp linux-2.6.39.4/drivers/crypto/hifn_795x.c linux-2.6.39.4/drivers/crypto/hifn_795x.c
24206 --- linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-05-19 00:06:34.000000000 -0400
24207 +++ linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-08-05 19:44:36.000000000 -0400
24208 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24209 0xCA, 0x34, 0x2B, 0x2E};
24210 struct scatterlist sg;
24211
24212 + pax_track_stack();
24213 +
24214 memset(src, 0, sizeof(src));
24215 memset(ctx.key, 0, sizeof(ctx.key));
24216
24217 diff -urNp linux-2.6.39.4/drivers/crypto/padlock-aes.c linux-2.6.39.4/drivers/crypto/padlock-aes.c
24218 --- linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-05-19 00:06:34.000000000 -0400
24219 +++ linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-08-05 19:44:36.000000000 -0400
24220 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24221 struct crypto_aes_ctx gen_aes;
24222 int cpu;
24223
24224 + pax_track_stack();
24225 +
24226 if (key_len % 8) {
24227 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24228 return -EINVAL;
24229 diff -urNp linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c
24230 --- linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-05-19 00:06:34.000000000 -0400
24231 +++ linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-08-05 19:44:36.000000000 -0400
24232 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24233 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24234 static int edac_pci_poll_msec = 1000; /* one second workq period */
24235
24236 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24237 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24238 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24239 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24240
24241 static struct kobject *edac_pci_top_main_kobj;
24242 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24243 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24244 edac_printk(KERN_CRIT, EDAC_PCI,
24245 "Signaled System Error on %s\n",
24246 pci_name(dev));
24247 - atomic_inc(&pci_nonparity_count);
24248 + atomic_inc_unchecked(&pci_nonparity_count);
24249 }
24250
24251 if (status & (PCI_STATUS_PARITY)) {
24252 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24253 "Master Data Parity Error on %s\n",
24254 pci_name(dev));
24255
24256 - atomic_inc(&pci_parity_count);
24257 + atomic_inc_unchecked(&pci_parity_count);
24258 }
24259
24260 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24261 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24262 "Detected Parity Error on %s\n",
24263 pci_name(dev));
24264
24265 - atomic_inc(&pci_parity_count);
24266 + atomic_inc_unchecked(&pci_parity_count);
24267 }
24268 }
24269
24270 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24271 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24272 "Signaled System Error on %s\n",
24273 pci_name(dev));
24274 - atomic_inc(&pci_nonparity_count);
24275 + atomic_inc_unchecked(&pci_nonparity_count);
24276 }
24277
24278 if (status & (PCI_STATUS_PARITY)) {
24279 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24280 "Master Data Parity Error on "
24281 "%s\n", pci_name(dev));
24282
24283 - atomic_inc(&pci_parity_count);
24284 + atomic_inc_unchecked(&pci_parity_count);
24285 }
24286
24287 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24288 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24289 "Detected Parity Error on %s\n",
24290 pci_name(dev));
24291
24292 - atomic_inc(&pci_parity_count);
24293 + atomic_inc_unchecked(&pci_parity_count);
24294 }
24295 }
24296 }
24297 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24298 if (!check_pci_errors)
24299 return;
24300
24301 - before_count = atomic_read(&pci_parity_count);
24302 + before_count = atomic_read_unchecked(&pci_parity_count);
24303
24304 /* scan all PCI devices looking for a Parity Error on devices and
24305 * bridges.
24306 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24307 /* Only if operator has selected panic on PCI Error */
24308 if (edac_pci_get_panic_on_pe()) {
24309 /* If the count is different 'after' from 'before' */
24310 - if (before_count != atomic_read(&pci_parity_count))
24311 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24312 panic("EDAC: PCI Parity Error");
24313 }
24314 }
24315 diff -urNp linux-2.6.39.4/drivers/edac/i7core_edac.c linux-2.6.39.4/drivers/edac/i7core_edac.c
24316 --- linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-05-19 00:06:34.000000000 -0400
24317 +++ linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-08-05 19:44:36.000000000 -0400
24318 @@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24319 char *type, *optype, *err, *msg;
24320 unsigned long error = m->status & 0x1ff0000l;
24321 u32 optypenum = (m->status >> 4) & 0x07;
24322 - u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24323 + u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24324 u32 dimm = (m->misc >> 16) & 0x3;
24325 u32 channel = (m->misc >> 18) & 0x3;
24326 u32 syndrome = m->misc >> 32;
24327 diff -urNp linux-2.6.39.4/drivers/edac/mce_amd.h linux-2.6.39.4/drivers/edac/mce_amd.h
24328 --- linux-2.6.39.4/drivers/edac/mce_amd.h 2011-05-19 00:06:34.000000000 -0400
24329 +++ linux-2.6.39.4/drivers/edac/mce_amd.h 2011-08-05 20:34:06.000000000 -0400
24330 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24331 bool (*dc_mce)(u16, u8);
24332 bool (*ic_mce)(u16, u8);
24333 bool (*nb_mce)(u16, u8);
24334 -};
24335 +} __no_const;
24336
24337 void amd_report_gart_errors(bool);
24338 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24339 diff -urNp linux-2.6.39.4/drivers/firewire/core-card.c linux-2.6.39.4/drivers/firewire/core-card.c
24340 --- linux-2.6.39.4/drivers/firewire/core-card.c 2011-05-19 00:06:34.000000000 -0400
24341 +++ linux-2.6.39.4/drivers/firewire/core-card.c 2011-08-05 20:34:06.000000000 -0400
24342 @@ -652,7 +652,7 @@ void fw_card_release(struct kref *kref)
24343
24344 void fw_core_remove_card(struct fw_card *card)
24345 {
24346 - struct fw_card_driver dummy_driver = dummy_driver_template;
24347 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24348
24349 card->driver->update_phy_reg(card, 4,
24350 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24351 diff -urNp linux-2.6.39.4/drivers/firewire/core-cdev.c linux-2.6.39.4/drivers/firewire/core-cdev.c
24352 --- linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-05-19 00:06:34.000000000 -0400
24353 +++ linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-08-05 19:44:36.000000000 -0400
24354 @@ -1312,8 +1312,7 @@ static int init_iso_resource(struct clie
24355 int ret;
24356
24357 if ((request->channels == 0 && request->bandwidth == 0) ||
24358 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24359 - request->bandwidth < 0)
24360 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24361 return -EINVAL;
24362
24363 r = kmalloc(sizeof(*r), GFP_KERNEL);
24364 diff -urNp linux-2.6.39.4/drivers/firewire/core.h linux-2.6.39.4/drivers/firewire/core.h
24365 --- linux-2.6.39.4/drivers/firewire/core.h 2011-05-19 00:06:34.000000000 -0400
24366 +++ linux-2.6.39.4/drivers/firewire/core.h 2011-08-05 20:34:06.000000000 -0400
24367 @@ -99,6 +99,7 @@ struct fw_card_driver {
24368
24369 int (*stop_iso)(struct fw_iso_context *ctx);
24370 };
24371 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24372
24373 void fw_card_initialize(struct fw_card *card,
24374 const struct fw_card_driver *driver, struct device *device);
24375 diff -urNp linux-2.6.39.4/drivers/firewire/core-transaction.c linux-2.6.39.4/drivers/firewire/core-transaction.c
24376 --- linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-05-19 00:06:34.000000000 -0400
24377 +++ linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-08-05 19:44:36.000000000 -0400
24378 @@ -36,6 +36,7 @@
24379 #include <linux/string.h>
24380 #include <linux/timer.h>
24381 #include <linux/types.h>
24382 +#include <linux/sched.h>
24383
24384 #include <asm/byteorder.h>
24385
24386 @@ -420,6 +421,8 @@ int fw_run_transaction(struct fw_card *c
24387 struct transaction_callback_data d;
24388 struct fw_transaction t;
24389
24390 + pax_track_stack();
24391 +
24392 init_timer_on_stack(&t.split_timeout_timer);
24393 init_completion(&d.done);
24394 d.payload = payload;
24395 diff -urNp linux-2.6.39.4/drivers/firmware/dmi_scan.c linux-2.6.39.4/drivers/firmware/dmi_scan.c
24396 --- linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-05-19 00:06:34.000000000 -0400
24397 +++ linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-08-05 19:44:36.000000000 -0400
24398 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24399 }
24400 }
24401 else {
24402 - /*
24403 - * no iounmap() for that ioremap(); it would be a no-op, but
24404 - * it's so early in setup that sucker gets confused into doing
24405 - * what it shouldn't if we actually call it.
24406 - */
24407 p = dmi_ioremap(0xF0000, 0x10000);
24408 if (p == NULL)
24409 goto error;
24410 diff -urNp linux-2.6.39.4/drivers/gpio/vr41xx_giu.c linux-2.6.39.4/drivers/gpio/vr41xx_giu.c
24411 --- linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-05-19 00:06:34.000000000 -0400
24412 +++ linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-08-05 19:44:36.000000000 -0400
24413 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24414 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24415 maskl, pendl, maskh, pendh);
24416
24417 - atomic_inc(&irq_err_count);
24418 + atomic_inc_unchecked(&irq_err_count);
24419
24420 return -EINVAL;
24421 }
24422 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c
24423 --- linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-05-19 00:06:34.000000000 -0400
24424 +++ linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-05 19:44:36.000000000 -0400
24425 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24426 struct drm_crtc *tmp;
24427 int crtc_mask = 1;
24428
24429 - WARN(!crtc, "checking null crtc?\n");
24430 + BUG_ON(!crtc);
24431
24432 dev = crtc->dev;
24433
24434 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24435 struct drm_encoder *encoder;
24436 bool ret = true;
24437
24438 + pax_track_stack();
24439 +
24440 crtc->enabled = drm_helper_crtc_in_use(crtc);
24441 if (!crtc->enabled)
24442 return true;
24443 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_drv.c linux-2.6.39.4/drivers/gpu/drm/drm_drv.c
24444 --- linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-05-19 00:06:34.000000000 -0400
24445 +++ linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-08-05 19:44:36.000000000 -0400
24446 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24447
24448 dev = file_priv->minor->dev;
24449 atomic_inc(&dev->ioctl_count);
24450 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24451 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24452 ++file_priv->ioctl_count;
24453
24454 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24455 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_fops.c linux-2.6.39.4/drivers/gpu/drm/drm_fops.c
24456 --- linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-05-19 00:06:34.000000000 -0400
24457 +++ linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-08-05 19:44:36.000000000 -0400
24458 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24459 }
24460
24461 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24462 - atomic_set(&dev->counts[i], 0);
24463 + atomic_set_unchecked(&dev->counts[i], 0);
24464
24465 dev->sigdata.lock = NULL;
24466
24467 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24468
24469 retcode = drm_open_helper(inode, filp, dev);
24470 if (!retcode) {
24471 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24472 - if (!dev->open_count++)
24473 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24474 + if (local_inc_return(&dev->open_count) == 1)
24475 retcode = drm_setup(dev);
24476 }
24477 if (!retcode) {
24478 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24479
24480 mutex_lock(&drm_global_mutex);
24481
24482 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24483 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24484
24485 if (dev->driver->preclose)
24486 dev->driver->preclose(dev, file_priv);
24487 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24488 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24489 task_pid_nr(current),
24490 (long)old_encode_dev(file_priv->minor->device),
24491 - dev->open_count);
24492 + local_read(&dev->open_count));
24493
24494 /* if the master has gone away we can't do anything with the lock */
24495 if (file_priv->minor->master)
24496 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24497 * End inline drm_release
24498 */
24499
24500 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24501 - if (!--dev->open_count) {
24502 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24503 + if (local_dec_and_test(&dev->open_count)) {
24504 if (atomic_read(&dev->ioctl_count)) {
24505 DRM_ERROR("Device busy: %d\n",
24506 atomic_read(&dev->ioctl_count));
24507 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_global.c linux-2.6.39.4/drivers/gpu/drm/drm_global.c
24508 --- linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-05-19 00:06:34.000000000 -0400
24509 +++ linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-08-05 19:44:36.000000000 -0400
24510 @@ -36,7 +36,7 @@
24511 struct drm_global_item {
24512 struct mutex mutex;
24513 void *object;
24514 - int refcount;
24515 + atomic_t refcount;
24516 };
24517
24518 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24519 @@ -49,7 +49,7 @@ void drm_global_init(void)
24520 struct drm_global_item *item = &glob[i];
24521 mutex_init(&item->mutex);
24522 item->object = NULL;
24523 - item->refcount = 0;
24524 + atomic_set(&item->refcount, 0);
24525 }
24526 }
24527
24528 @@ -59,7 +59,7 @@ void drm_global_release(void)
24529 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24530 struct drm_global_item *item = &glob[i];
24531 BUG_ON(item->object != NULL);
24532 - BUG_ON(item->refcount != 0);
24533 + BUG_ON(atomic_read(&item->refcount) != 0);
24534 }
24535 }
24536
24537 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24538 void *object;
24539
24540 mutex_lock(&item->mutex);
24541 - if (item->refcount == 0) {
24542 + if (atomic_read(&item->refcount) == 0) {
24543 item->object = kzalloc(ref->size, GFP_KERNEL);
24544 if (unlikely(item->object == NULL)) {
24545 ret = -ENOMEM;
24546 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24547 goto out_err;
24548
24549 }
24550 - ++item->refcount;
24551 + atomic_inc(&item->refcount);
24552 ref->object = item->object;
24553 object = item->object;
24554 mutex_unlock(&item->mutex);
24555 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24556 struct drm_global_item *item = &glob[ref->global_type];
24557
24558 mutex_lock(&item->mutex);
24559 - BUG_ON(item->refcount == 0);
24560 + BUG_ON(atomic_read(&item->refcount) == 0);
24561 BUG_ON(ref->object != item->object);
24562 - if (--item->refcount == 0) {
24563 + if (atomic_dec_and_test(&item->refcount)) {
24564 ref->release(ref);
24565 item->object = NULL;
24566 }
24567 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_info.c linux-2.6.39.4/drivers/gpu/drm/drm_info.c
24568 --- linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-05-19 00:06:34.000000000 -0400
24569 +++ linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-08-05 19:44:36.000000000 -0400
24570 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24571 struct drm_local_map *map;
24572 struct drm_map_list *r_list;
24573
24574 - /* Hardcoded from _DRM_FRAME_BUFFER,
24575 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24576 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24577 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24578 + static const char * const types[] = {
24579 + [_DRM_FRAME_BUFFER] = "FB",
24580 + [_DRM_REGISTERS] = "REG",
24581 + [_DRM_SHM] = "SHM",
24582 + [_DRM_AGP] = "AGP",
24583 + [_DRM_SCATTER_GATHER] = "SG",
24584 + [_DRM_CONSISTENT] = "PCI",
24585 + [_DRM_GEM] = "GEM" };
24586 const char *type;
24587 int i;
24588
24589 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24590 map = r_list->map;
24591 if (!map)
24592 continue;
24593 - if (map->type < 0 || map->type > 5)
24594 + if (map->type >= ARRAY_SIZE(types))
24595 type = "??";
24596 else
24597 type = types[map->type];
24598 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24599 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24600 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24601 vma->vm_flags & VM_IO ? 'i' : '-',
24602 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24603 + 0);
24604 +#else
24605 vma->vm_pgoff);
24606 +#endif
24607
24608 #if defined(__i386__)
24609 pgprot = pgprot_val(vma->vm_page_prot);
24610 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c
24611 --- linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-05-19 00:06:34.000000000 -0400
24612 +++ linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-08-05 19:44:36.000000000 -0400
24613 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24614 stats->data[i].value =
24615 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24616 else
24617 - stats->data[i].value = atomic_read(&dev->counts[i]);
24618 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24619 stats->data[i].type = dev->types[i];
24620 }
24621
24622 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_lock.c linux-2.6.39.4/drivers/gpu/drm/drm_lock.c
24623 --- linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-05-19 00:06:34.000000000 -0400
24624 +++ linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-08-05 19:44:36.000000000 -0400
24625 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24626 if (drm_lock_take(&master->lock, lock->context)) {
24627 master->lock.file_priv = file_priv;
24628 master->lock.lock_time = jiffies;
24629 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24630 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24631 break; /* Got lock */
24632 }
24633
24634 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24635 return -EINVAL;
24636 }
24637
24638 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24639 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24640
24641 if (drm_lock_free(&master->lock, lock->context)) {
24642 /* FIXME: Should really bail out here. */
24643 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c
24644 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-05-19 00:06:34.000000000 -0400
24645 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-05 19:44:36.000000000 -0400
24646 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24647 dma->buflist[vertex->idx],
24648 vertex->discard, vertex->used);
24649
24650 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24651 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24652 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24653 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24654 sarea_priv->last_enqueue = dev_priv->counter - 1;
24655 sarea_priv->last_dispatch = (int)hw_status[5];
24656
24657 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24658 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24659 mc->last_render);
24660
24661 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24662 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24663 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24664 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24665 sarea_priv->last_enqueue = dev_priv->counter - 1;
24666 sarea_priv->last_dispatch = (int)hw_status[5];
24667
24668 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h
24669 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-05-19 00:06:34.000000000 -0400
24670 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-05 19:44:36.000000000 -0400
24671 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24672 int page_flipping;
24673
24674 wait_queue_head_t irq_queue;
24675 - atomic_t irq_received;
24676 - atomic_t irq_emitted;
24677 + atomic_unchecked_t irq_received;
24678 + atomic_unchecked_t irq_emitted;
24679
24680 int front_offset;
24681 } drm_i810_private_t;
24682 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c
24683 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-19 00:06:34.000000000 -0400
24684 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-05 19:44:36.000000000 -0400
24685 @@ -496,7 +496,7 @@ static int i915_interrupt_info(struct se
24686 I915_READ(GTIMR));
24687 }
24688 seq_printf(m, "Interrupts received: %d\n",
24689 - atomic_read(&dev_priv->irq_received));
24690 + atomic_read_unchecked(&dev_priv->irq_received));
24691 for (i = 0; i < I915_NUM_RINGS; i++) {
24692 if (IS_GEN6(dev)) {
24693 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24694 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c
24695 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-05-19 00:06:34.000000000 -0400
24696 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-05 19:44:36.000000000 -0400
24697 @@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
24698 bool can_switch;
24699
24700 spin_lock(&dev->count_lock);
24701 - can_switch = (dev->open_count == 0);
24702 + can_switch = (local_read(&dev->open_count) == 0);
24703 spin_unlock(&dev->count_lock);
24704 return can_switch;
24705 }
24706 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h
24707 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-05-19 00:06:34.000000000 -0400
24708 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:34:06.000000000 -0400
24709 @@ -209,7 +209,7 @@ struct drm_i915_display_funcs {
24710 /* display clock increase/decrease */
24711 /* pll clock increase/decrease */
24712 /* clock gating init */
24713 -};
24714 +} __no_const;
24715
24716 struct intel_device_info {
24717 u8 gen;
24718 @@ -287,7 +287,7 @@ typedef struct drm_i915_private {
24719 int current_page;
24720 int page_flipping;
24721
24722 - atomic_t irq_received;
24723 + atomic_unchecked_t irq_received;
24724
24725 /* protects the irq masks */
24726 spinlock_t irq_lock;
24727 @@ -848,7 +848,7 @@ struct drm_i915_gem_object {
24728 * will be page flipped away on the next vblank. When it
24729 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24730 */
24731 - atomic_t pending_flip;
24732 + atomic_unchecked_t pending_flip;
24733 };
24734
24735 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24736 @@ -1232,7 +1232,7 @@ extern int intel_setup_gmbus(struct drm_
24737 extern void intel_teardown_gmbus(struct drm_device *dev);
24738 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24739 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24740 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24741 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24742 {
24743 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24744 }
24745 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24746 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-19 00:06:34.000000000 -0400
24747 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-05 19:44:36.000000000 -0400
24748 @@ -192,7 +192,7 @@ i915_gem_object_set_to_gpu_domain(struct
24749 i915_gem_release_mmap(obj);
24750
24751 if (obj->base.pending_write_domain)
24752 - cd->flips |= atomic_read(&obj->pending_flip);
24753 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24754
24755 /* The actual obj->write_domain will be updated with
24756 * pending_write_domain after we emit the accumulated flush for all
24757 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c
24758 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-07-09 09:18:51.000000000 -0400
24759 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-05 19:44:36.000000000 -0400
24760 @@ -1101,7 +1101,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
24761 int ret = IRQ_NONE, pipe;
24762 bool blc_event = false;
24763
24764 - atomic_inc(&dev_priv->irq_received);
24765 + atomic_inc_unchecked(&dev_priv->irq_received);
24766
24767 if (HAS_PCH_SPLIT(dev))
24768 return ironlake_irq_handler(dev);
24769 @@ -1666,7 +1666,7 @@ void i915_driver_irq_preinstall(struct d
24770 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24771 int pipe;
24772
24773 - atomic_set(&dev_priv->irq_received, 0);
24774 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24775
24776 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24777 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24778 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c
24779 --- linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-05-19 00:06:34.000000000 -0400
24780 +++ linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-08-05 19:44:36.000000000 -0400
24781 @@ -2244,7 +2244,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24782
24783 wait_event(dev_priv->pending_flip_queue,
24784 atomic_read(&dev_priv->mm.wedged) ||
24785 - atomic_read(&obj->pending_flip) == 0);
24786 + atomic_read_unchecked(&obj->pending_flip) == 0);
24787
24788 /* Big Hammer, we also need to ensure that any pending
24789 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24790 @@ -2712,7 +2712,7 @@ static void intel_crtc_wait_for_pending_
24791 obj = to_intel_framebuffer(crtc->fb)->obj;
24792 dev_priv = crtc->dev->dev_private;
24793 wait_event(dev_priv->pending_flip_queue,
24794 - atomic_read(&obj->pending_flip) == 0);
24795 + atomic_read_unchecked(&obj->pending_flip) == 0);
24796 }
24797
24798 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24799 @@ -6016,7 +6016,7 @@ static void do_intel_finish_page_flip(st
24800
24801 atomic_clear_mask(1 << intel_crtc->plane,
24802 &obj->pending_flip.counter);
24803 - if (atomic_read(&obj->pending_flip) == 0)
24804 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24805 wake_up(&dev_priv->pending_flip_queue);
24806
24807 schedule_work(&work->work);
24808 @@ -6145,7 +6145,7 @@ static int intel_crtc_page_flip(struct d
24809 /* Block clients from rendering to the new back buffer until
24810 * the flip occurs and the object is no longer visible.
24811 */
24812 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24813 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24814
24815 switch (INTEL_INFO(dev)->gen) {
24816 case 2:
24817 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h
24818 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-05-19 00:06:34.000000000 -0400
24819 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-05 19:44:36.000000000 -0400
24820 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24821 u32 clear_cmd;
24822 u32 maccess;
24823
24824 - atomic_t vbl_received; /**< Number of vblanks received. */
24825 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24826 wait_queue_head_t fence_queue;
24827 - atomic_t last_fence_retired;
24828 + atomic_unchecked_t last_fence_retired;
24829 u32 next_fence_to_post;
24830
24831 unsigned int fb_cpp;
24832 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c
24833 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-05-19 00:06:34.000000000 -0400
24834 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-05 19:44:36.000000000 -0400
24835 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24836 if (crtc != 0)
24837 return 0;
24838
24839 - return atomic_read(&dev_priv->vbl_received);
24840 + return atomic_read_unchecked(&dev_priv->vbl_received);
24841 }
24842
24843
24844 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24845 /* VBLANK interrupt */
24846 if (status & MGA_VLINEPEN) {
24847 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24848 - atomic_inc(&dev_priv->vbl_received);
24849 + atomic_inc_unchecked(&dev_priv->vbl_received);
24850 drm_handle_vblank(dev, 0);
24851 handled = 1;
24852 }
24853 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24854 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24855 MGA_WRITE(MGA_PRIMEND, prim_end);
24856
24857 - atomic_inc(&dev_priv->last_fence_retired);
24858 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24859 DRM_WAKEUP(&dev_priv->fence_queue);
24860 handled = 1;
24861 }
24862 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24863 * using fences.
24864 */
24865 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24866 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24867 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24868 - *sequence) <= (1 << 23)));
24869
24870 *sequence = cur_fence;
24871 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24872 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-05-19 00:06:34.000000000 -0400
24873 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-05 20:34:06.000000000 -0400
24874 @@ -228,7 +228,7 @@ struct nouveau_channel {
24875 struct list_head pending;
24876 uint32_t sequence;
24877 uint32_t sequence_ack;
24878 - atomic_t last_sequence_irq;
24879 + atomic_unchecked_t last_sequence_irq;
24880 } fence;
24881
24882 /* DMA push buffer */
24883 @@ -317,13 +317,13 @@ struct nouveau_instmem_engine {
24884 struct nouveau_mc_engine {
24885 int (*init)(struct drm_device *dev);
24886 void (*takedown)(struct drm_device *dev);
24887 -};
24888 +} __no_const;
24889
24890 struct nouveau_timer_engine {
24891 int (*init)(struct drm_device *dev);
24892 void (*takedown)(struct drm_device *dev);
24893 uint64_t (*read)(struct drm_device *dev);
24894 -};
24895 +} __no_const;
24896
24897 struct nouveau_fb_engine {
24898 int num_tiles;
24899 @@ -516,7 +516,7 @@ struct nouveau_vram_engine {
24900 void (*put)(struct drm_device *, struct nouveau_mem **);
24901
24902 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24903 -};
24904 +} __no_const;
24905
24906 struct nouveau_engine {
24907 struct nouveau_instmem_engine instmem;
24908 @@ -662,7 +662,7 @@ struct drm_nouveau_private {
24909 struct drm_global_reference mem_global_ref;
24910 struct ttm_bo_global_ref bo_global_ref;
24911 struct ttm_bo_device bdev;
24912 - atomic_t validate_sequence;
24913 + atomic_unchecked_t validate_sequence;
24914 } ttm;
24915
24916 struct {
24917 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24918 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-19 00:06:34.000000000 -0400
24919 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-05 19:44:36.000000000 -0400
24920 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24921 if (USE_REFCNT(dev))
24922 sequence = nvchan_rd32(chan, 0x48);
24923 else
24924 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24925 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24926
24927 if (chan->fence.sequence_ack == sequence)
24928 goto out;
24929 @@ -553,7 +553,7 @@ nouveau_fence_channel_init(struct nouvea
24930 out_initialised:
24931 INIT_LIST_HEAD(&chan->fence.pending);
24932 spin_lock_init(&chan->fence.lock);
24933 - atomic_set(&chan->fence.last_sequence_irq, 0);
24934 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24935 return 0;
24936 }
24937
24938 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24939 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-19 00:06:34.000000000 -0400
24940 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-05 19:44:36.000000000 -0400
24941 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24942 int trycnt = 0;
24943 int ret, i;
24944
24945 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24946 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24947 retry:
24948 if (++trycnt > 100000) {
24949 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24950 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c
24951 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-19 00:06:34.000000000 -0400
24952 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-05 19:44:36.000000000 -0400
24953 @@ -583,7 +583,7 @@ static bool nouveau_switcheroo_can_switc
24954 bool can_switch;
24955
24956 spin_lock(&dev->count_lock);
24957 - can_switch = (dev->open_count == 0);
24958 + can_switch = (local_read(&dev->open_count) == 0);
24959 spin_unlock(&dev->count_lock);
24960 return can_switch;
24961 }
24962 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c
24963 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-19 00:06:34.000000000 -0400
24964 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-05 19:44:36.000000000 -0400
24965 @@ -552,7 +552,7 @@ static int
24966 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24967 u32 class, u32 mthd, u32 data)
24968 {
24969 - atomic_set(&chan->fence.last_sequence_irq, data);
24970 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24971 return 0;
24972 }
24973
24974 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c
24975 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-05-19 00:06:34.000000000 -0400
24976 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-05 19:44:36.000000000 -0400
24977 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24978
24979 /* GH: Simple idle check.
24980 */
24981 - atomic_set(&dev_priv->idle_count, 0);
24982 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24983
24984 /* We don't support anything other than bus-mastering ring mode,
24985 * but the ring can be in either AGP or PCI space for the ring
24986 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h
24987 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-05-19 00:06:34.000000000 -0400
24988 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-05 19:44:36.000000000 -0400
24989 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24990 int is_pci;
24991 unsigned long cce_buffers_offset;
24992
24993 - atomic_t idle_count;
24994 + atomic_unchecked_t idle_count;
24995
24996 int page_flipping;
24997 int current_page;
24998 u32 crtc_offset;
24999 u32 crtc_offset_cntl;
25000
25001 - atomic_t vbl_received;
25002 + atomic_unchecked_t vbl_received;
25003
25004 u32 color_fmt;
25005 unsigned int front_offset;
25006 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c
25007 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-05-19 00:06:34.000000000 -0400
25008 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-05 19:44:36.000000000 -0400
25009 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
25010 if (crtc != 0)
25011 return 0;
25012
25013 - return atomic_read(&dev_priv->vbl_received);
25014 + return atomic_read_unchecked(&dev_priv->vbl_received);
25015 }
25016
25017 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
25018 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
25019 /* VBLANK interrupt */
25020 if (status & R128_CRTC_VBLANK_INT) {
25021 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
25022 - atomic_inc(&dev_priv->vbl_received);
25023 + atomic_inc_unchecked(&dev_priv->vbl_received);
25024 drm_handle_vblank(dev, 0);
25025 return IRQ_HANDLED;
25026 }
25027 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c
25028 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-05-19 00:06:34.000000000 -0400
25029 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-08-05 19:44:36.000000000 -0400
25030 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25031
25032 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25033 {
25034 - if (atomic_read(&dev_priv->idle_count) == 0)
25035 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25036 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25037 else
25038 - atomic_set(&dev_priv->idle_count, 0);
25039 + atomic_set_unchecked(&dev_priv->idle_count, 0);
25040 }
25041
25042 #endif
25043 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c
25044 --- linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-05-19 00:06:34.000000000 -0400
25045 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-08-05 19:44:36.000000000 -0400
25046 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
25047 char name[512];
25048 int i;
25049
25050 + pax_track_stack();
25051 +
25052 ctx->card = card;
25053 ctx->bios = bios;
25054
25055 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c
25056 --- linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-05-19 00:06:34.000000000 -0400
25057 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-05 19:44:36.000000000 -0400
25058 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
25059 regex_t mask_rex;
25060 regmatch_t match[4];
25061 char buf[1024];
25062 - size_t end;
25063 + long end;
25064 int len;
25065 int done = 0;
25066 int r;
25067 unsigned o;
25068 struct offset *offset;
25069 char last_reg_s[10];
25070 - int last_reg;
25071 + unsigned long last_reg;
25072
25073 if (regcomp
25074 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25075 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c
25076 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-19 00:06:34.000000000 -0400
25077 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-05 19:44:36.000000000 -0400
25078 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25079 struct radeon_gpio_rec gpio;
25080 struct radeon_hpd hpd;
25081
25082 + pax_track_stack();
25083 +
25084 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25085 return false;
25086
25087 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c
25088 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-06-25 12:55:22.000000000 -0400
25089 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-05 19:44:36.000000000 -0400
25090 @@ -674,7 +674,7 @@ static bool radeon_switcheroo_can_switch
25091 bool can_switch;
25092
25093 spin_lock(&dev->count_lock);
25094 - can_switch = (dev->open_count == 0);
25095 + can_switch = (local_read(&dev->open_count) == 0);
25096 spin_unlock(&dev->count_lock);
25097 return can_switch;
25098 }
25099 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c
25100 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:11:51.000000000 -0400
25101 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:12:20.000000000 -0400
25102 @@ -937,6 +937,8 @@ void radeon_compute_pll_legacy(struct ra
25103 uint32_t post_div;
25104 u32 pll_out_min, pll_out_max;
25105
25106 + pax_track_stack();
25107 +
25108 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25109 freq = freq * 1000;
25110
25111 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h
25112 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-19 00:06:34.000000000 -0400
25113 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-05 19:44:36.000000000 -0400
25114 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25115
25116 /* SW interrupt */
25117 wait_queue_head_t swi_queue;
25118 - atomic_t swi_emitted;
25119 + atomic_unchecked_t swi_emitted;
25120 int vblank_crtc;
25121 uint32_t irq_enable_reg;
25122 uint32_t r500_disp_irq_reg;
25123 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c
25124 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-19 00:06:34.000000000 -0400
25125 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-05 19:44:36.000000000 -0400
25126 @@ -49,7 +49,7 @@ int radeon_fence_emit(struct radeon_devi
25127 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25128 return 0;
25129 }
25130 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25131 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25132 if (!rdev->cp.ready) {
25133 /* FIXME: cp is not running assume everythings is done right
25134 * away
25135 @@ -352,7 +352,7 @@ int radeon_fence_driver_init(struct rade
25136 return r;
25137 }
25138 WREG32(rdev->fence_drv.scratch_reg, 0);
25139 - atomic_set(&rdev->fence_drv.seq, 0);
25140 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25141 INIT_LIST_HEAD(&rdev->fence_drv.created);
25142 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25143 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25144 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h
25145 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-05-19 00:06:34.000000000 -0400
25146 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:34:06.000000000 -0400
25147 @@ -189,7 +189,7 @@ extern int sumo_get_temp(struct radeon_d
25148 */
25149 struct radeon_fence_driver {
25150 uint32_t scratch_reg;
25151 - atomic_t seq;
25152 + atomic_unchecked_t seq;
25153 uint32_t last_seq;
25154 unsigned long last_jiffies;
25155 unsigned long last_timeout;
25156 @@ -958,7 +958,7 @@ struct radeon_asic {
25157 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25158 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25159 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25160 -};
25161 +} __no_const;
25162
25163 /*
25164 * Asic structures
25165 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25166 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-19 00:06:34.000000000 -0400
25167 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-05 19:44:36.000000000 -0400
25168 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25169 request = compat_alloc_user_space(sizeof(*request));
25170 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25171 || __put_user(req32.param, &request->param)
25172 - || __put_user((void __user *)(unsigned long)req32.value,
25173 + || __put_user((unsigned long)req32.value,
25174 &request->value))
25175 return -EFAULT;
25176
25177 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c
25178 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-19 00:06:34.000000000 -0400
25179 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-05 19:44:36.000000000 -0400
25180 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25181 unsigned int ret;
25182 RING_LOCALS;
25183
25184 - atomic_inc(&dev_priv->swi_emitted);
25185 - ret = atomic_read(&dev_priv->swi_emitted);
25186 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25187 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25188
25189 BEGIN_RING(4);
25190 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25191 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25192 drm_radeon_private_t *dev_priv =
25193 (drm_radeon_private_t *) dev->dev_private;
25194
25195 - atomic_set(&dev_priv->swi_emitted, 0);
25196 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25197 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25198
25199 dev->max_vblank_count = 0x001fffff;
25200 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c
25201 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-05-19 00:06:34.000000000 -0400
25202 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-05 19:44:36.000000000 -0400
25203 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25204 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25205 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25206
25207 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25208 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25209 sarea_priv->nbox * sizeof(depth_boxes[0])))
25210 return -EFAULT;
25211
25212 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25213 {
25214 drm_radeon_private_t *dev_priv = dev->dev_private;
25215 drm_radeon_getparam_t *param = data;
25216 - int value;
25217 + int value = 0;
25218
25219 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25220
25221 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c
25222 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-19 00:06:34.000000000 -0400
25223 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-05 20:34:06.000000000 -0400
25224 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25225 }
25226 if (unlikely(ttm_vm_ops == NULL)) {
25227 ttm_vm_ops = vma->vm_ops;
25228 - radeon_ttm_vm_ops = *ttm_vm_ops;
25229 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25230 + pax_open_kernel();
25231 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25232 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25233 + pax_close_kernel();
25234 }
25235 vma->vm_ops = &radeon_ttm_vm_ops;
25236 return 0;
25237 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c
25238 --- linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-05-19 00:06:34.000000000 -0400
25239 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-08-05 19:44:36.000000000 -0400
25240 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25241 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25242 rdev->pm.sideport_bandwidth.full)
25243 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25244 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25245 + read_delay_latency.full = dfixed_const(800 * 1000);
25246 read_delay_latency.full = dfixed_div(read_delay_latency,
25247 rdev->pm.igp_sideport_mclk);
25248 + a.full = dfixed_const(370);
25249 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25250 } else {
25251 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25252 rdev->pm.k8_bandwidth.full)
25253 diff -urNp linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25254 --- linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-19 00:06:34.000000000 -0400
25255 +++ linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-05 19:44:36.000000000 -0400
25256 @@ -397,9 +397,9 @@ static int ttm_pool_get_num_unused_pages
25257 */
25258 static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
25259 {
25260 - static atomic_t start_pool = ATOMIC_INIT(0);
25261 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25262 unsigned i;
25263 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25264 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25265 struct ttm_page_pool *pool;
25266
25267 pool_offset = pool_offset % NUM_POOLS;
25268 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h
25269 --- linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-05-19 00:06:34.000000000 -0400
25270 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-08-05 19:44:36.000000000 -0400
25271 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25272 typedef uint32_t maskarray_t[5];
25273
25274 typedef struct drm_via_irq {
25275 - atomic_t irq_received;
25276 + atomic_unchecked_t irq_received;
25277 uint32_t pending_mask;
25278 uint32_t enable_mask;
25279 wait_queue_head_t irq_queue;
25280 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25281 struct timeval last_vblank;
25282 int last_vblank_valid;
25283 unsigned usec_per_vblank;
25284 - atomic_t vbl_received;
25285 + atomic_unchecked_t vbl_received;
25286 drm_via_state_t hc_state;
25287 char pci_buf[VIA_PCI_BUF_SIZE];
25288 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25289 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c
25290 --- linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-05-19 00:06:34.000000000 -0400
25291 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-08-05 19:44:36.000000000 -0400
25292 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25293 if (crtc != 0)
25294 return 0;
25295
25296 - return atomic_read(&dev_priv->vbl_received);
25297 + return atomic_read_unchecked(&dev_priv->vbl_received);
25298 }
25299
25300 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25301 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25302
25303 status = VIA_READ(VIA_REG_INTERRUPT);
25304 if (status & VIA_IRQ_VBLANK_PENDING) {
25305 - atomic_inc(&dev_priv->vbl_received);
25306 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25307 + atomic_inc_unchecked(&dev_priv->vbl_received);
25308 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25309 do_gettimeofday(&cur_vblank);
25310 if (dev_priv->last_vblank_valid) {
25311 dev_priv->usec_per_vblank =
25312 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25313 dev_priv->last_vblank = cur_vblank;
25314 dev_priv->last_vblank_valid = 1;
25315 }
25316 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25317 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25318 DRM_DEBUG("US per vblank is: %u\n",
25319 dev_priv->usec_per_vblank);
25320 }
25321 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25322
25323 for (i = 0; i < dev_priv->num_irqs; ++i) {
25324 if (status & cur_irq->pending_mask) {
25325 - atomic_inc(&cur_irq->irq_received);
25326 + atomic_inc_unchecked(&cur_irq->irq_received);
25327 DRM_WAKEUP(&cur_irq->irq_queue);
25328 handled = 1;
25329 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25330 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25331 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25332 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25333 masks[irq][4]));
25334 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25335 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25336 } else {
25337 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25338 (((cur_irq_sequence =
25339 - atomic_read(&cur_irq->irq_received)) -
25340 + atomic_read_unchecked(&cur_irq->irq_received)) -
25341 *sequence) <= (1 << 23)));
25342 }
25343 *sequence = cur_irq_sequence;
25344 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25345 }
25346
25347 for (i = 0; i < dev_priv->num_irqs; ++i) {
25348 - atomic_set(&cur_irq->irq_received, 0);
25349 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25350 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25351 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25352 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25353 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25354 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25355 case VIA_IRQ_RELATIVE:
25356 irqwait->request.sequence +=
25357 - atomic_read(&cur_irq->irq_received);
25358 + atomic_read_unchecked(&cur_irq->irq_received);
25359 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25360 case VIA_IRQ_ABSOLUTE:
25361 break;
25362 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25363 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-19 00:06:34.000000000 -0400
25364 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-05 19:44:36.000000000 -0400
25365 @@ -240,7 +240,7 @@ struct vmw_private {
25366 * Fencing and IRQs.
25367 */
25368
25369 - atomic_t fence_seq;
25370 + atomic_unchecked_t fence_seq;
25371 wait_queue_head_t fence_queue;
25372 wait_queue_head_t fifo_queue;
25373 atomic_t fence_queue_waiters;
25374 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25375 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-19 00:06:34.000000000 -0400
25376 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-05 19:44:36.000000000 -0400
25377 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25378 while (!vmw_lag_lt(queue, us)) {
25379 spin_lock(&queue->lock);
25380 if (list_empty(&queue->head))
25381 - sequence = atomic_read(&dev_priv->fence_seq);
25382 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25383 else {
25384 fence = list_first_entry(&queue->head,
25385 struct vmw_fence, head);
25386 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25387 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-19 00:06:34.000000000 -0400
25388 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-05 20:34:06.000000000 -0400
25389 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25390 (unsigned int) min,
25391 (unsigned int) fifo->capabilities);
25392
25393 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25394 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25395 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25396 vmw_fence_queue_init(&fifo->fence_queue);
25397 return vmw_fifo_send_fence(dev_priv, &dummy);
25398 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25399
25400 fm = vmw_fifo_reserve(dev_priv, bytes);
25401 if (unlikely(fm == NULL)) {
25402 - *sequence = atomic_read(&dev_priv->fence_seq);
25403 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25404 ret = -ENOMEM;
25405 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25406 false, 3*HZ);
25407 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25408 }
25409
25410 do {
25411 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25412 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25413 } while (*sequence == 0);
25414
25415 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25416 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25417 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-19 00:06:34.000000000 -0400
25418 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-05 19:44:36.000000000 -0400
25419 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25420 * emitted. Then the fence is stale and signaled.
25421 */
25422
25423 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25424 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25425 > VMW_FENCE_WRAP);
25426
25427 return ret;
25428 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25429
25430 if (fifo_idle)
25431 down_read(&fifo_state->rwsem);
25432 - signal_seq = atomic_read(&dev_priv->fence_seq);
25433 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25434 ret = 0;
25435
25436 for (;;) {
25437 diff -urNp linux-2.6.39.4/drivers/hid/hid-core.c linux-2.6.39.4/drivers/hid/hid-core.c
25438 --- linux-2.6.39.4/drivers/hid/hid-core.c 2011-05-19 00:06:34.000000000 -0400
25439 +++ linux-2.6.39.4/drivers/hid/hid-core.c 2011-08-05 19:44:36.000000000 -0400
25440 @@ -1888,7 +1888,7 @@ static bool hid_ignore(struct hid_device
25441
25442 int hid_add_device(struct hid_device *hdev)
25443 {
25444 - static atomic_t id = ATOMIC_INIT(0);
25445 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25446 int ret;
25447
25448 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25449 @@ -1903,7 +1903,7 @@ int hid_add_device(struct hid_device *hd
25450 /* XXX hack, any other cleaner solution after the driver core
25451 * is converted to allow more than 20 bytes as the device name? */
25452 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25453 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25454 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25455
25456 hid_debug_register(hdev, dev_name(&hdev->dev));
25457 ret = device_add(&hdev->dev);
25458 diff -urNp linux-2.6.39.4/drivers/hid/usbhid/hiddev.c linux-2.6.39.4/drivers/hid/usbhid/hiddev.c
25459 --- linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-05-19 00:06:34.000000000 -0400
25460 +++ linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-08-05 19:44:36.000000000 -0400
25461 @@ -613,7 +613,7 @@ static long hiddev_ioctl(struct file *fi
25462 break;
25463
25464 case HIDIOCAPPLICATION:
25465 - if (arg < 0 || arg >= hid->maxapplication)
25466 + if (arg >= hid->maxapplication)
25467 break;
25468
25469 for (i = 0; i < hid->maxcollection; i++)
25470 diff -urNp linux-2.6.39.4/drivers/hwmon/sht15.c linux-2.6.39.4/drivers/hwmon/sht15.c
25471 --- linux-2.6.39.4/drivers/hwmon/sht15.c 2011-05-19 00:06:34.000000000 -0400
25472 +++ linux-2.6.39.4/drivers/hwmon/sht15.c 2011-08-05 19:44:36.000000000 -0400
25473 @@ -113,7 +113,7 @@ struct sht15_data {
25474 int supply_uV;
25475 int supply_uV_valid;
25476 struct work_struct update_supply_work;
25477 - atomic_t interrupt_handled;
25478 + atomic_unchecked_t interrupt_handled;
25479 };
25480
25481 /**
25482 @@ -246,13 +246,13 @@ static inline int sht15_update_single_va
25483 return ret;
25484
25485 gpio_direction_input(data->pdata->gpio_data);
25486 - atomic_set(&data->interrupt_handled, 0);
25487 + atomic_set_unchecked(&data->interrupt_handled, 0);
25488
25489 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25490 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25491 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25492 /* Only relevant if the interrupt hasn't occurred. */
25493 - if (!atomic_read(&data->interrupt_handled))
25494 + if (!atomic_read_unchecked(&data->interrupt_handled))
25495 schedule_work(&data->read_work);
25496 }
25497 ret = wait_event_timeout(data->wait_queue,
25498 @@ -399,7 +399,7 @@ static irqreturn_t sht15_interrupt_fired
25499 struct sht15_data *data = d;
25500 /* First disable the interrupt */
25501 disable_irq_nosync(irq);
25502 - atomic_inc(&data->interrupt_handled);
25503 + atomic_inc_unchecked(&data->interrupt_handled);
25504 /* Then schedule a reading work struct */
25505 if (data->flag != SHT15_READING_NOTHING)
25506 schedule_work(&data->read_work);
25507 @@ -450,11 +450,11 @@ static void sht15_bh_read_data(struct wo
25508 here as could have gone low in meantime so verify
25509 it hasn't!
25510 */
25511 - atomic_set(&data->interrupt_handled, 0);
25512 + atomic_set_unchecked(&data->interrupt_handled, 0);
25513 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25514 /* If still not occurred or another handler has been scheduled */
25515 if (gpio_get_value(data->pdata->gpio_data)
25516 - || atomic_read(&data->interrupt_handled))
25517 + || atomic_read_unchecked(&data->interrupt_handled))
25518 return;
25519 }
25520 /* Read the data back from the device */
25521 diff -urNp linux-2.6.39.4/drivers/hwmon/w83791d.c linux-2.6.39.4/drivers/hwmon/w83791d.c
25522 --- linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-05-19 00:06:34.000000000 -0400
25523 +++ linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-08-05 19:44:36.000000000 -0400
25524 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25525 struct i2c_board_info *info);
25526 static int w83791d_remove(struct i2c_client *client);
25527
25528 -static int w83791d_read(struct i2c_client *client, u8 register);
25529 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25530 +static int w83791d_read(struct i2c_client *client, u8 reg);
25531 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25532 static struct w83791d_data *w83791d_update_device(struct device *dev);
25533
25534 #ifdef DEBUG
25535 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c
25536 --- linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-05-19 00:06:34.000000000 -0400
25537 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:34:06.000000000 -0400
25538 @@ -43,7 +43,7 @@
25539 extern struct i2c_adapter amd756_smbus;
25540
25541 static struct i2c_adapter *s4882_adapter;
25542 -static struct i2c_algorithm *s4882_algo;
25543 +static i2c_algorithm_no_const *s4882_algo;
25544
25545 /* Wrapper access functions for multiplexed SMBus */
25546 static DEFINE_MUTEX(amd756_lock);
25547 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25548 --- linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-05-19 00:06:34.000000000 -0400
25549 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:34:06.000000000 -0400
25550 @@ -41,7 +41,7 @@
25551 extern struct i2c_adapter *nforce2_smbus;
25552
25553 static struct i2c_adapter *s4985_adapter;
25554 -static struct i2c_algorithm *s4985_algo;
25555 +static i2c_algorithm_no_const *s4985_algo;
25556
25557 /* Wrapper access functions for multiplexed SMBus */
25558 static DEFINE_MUTEX(nforce2_lock);
25559 diff -urNp linux-2.6.39.4/drivers/i2c/i2c-mux.c linux-2.6.39.4/drivers/i2c/i2c-mux.c
25560 --- linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-05-19 00:06:34.000000000 -0400
25561 +++ linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-08-05 20:34:06.000000000 -0400
25562 @@ -28,7 +28,7 @@
25563 /* multiplexer per channel data */
25564 struct i2c_mux_priv {
25565 struct i2c_adapter adap;
25566 - struct i2c_algorithm algo;
25567 + i2c_algorithm_no_const algo;
25568
25569 struct i2c_adapter *parent;
25570 void *mux_dev; /* the mux chip/device */
25571 diff -urNp linux-2.6.39.4/drivers/ide/ide-cd.c linux-2.6.39.4/drivers/ide/ide-cd.c
25572 --- linux-2.6.39.4/drivers/ide/ide-cd.c 2011-06-03 00:04:14.000000000 -0400
25573 +++ linux-2.6.39.4/drivers/ide/ide-cd.c 2011-08-05 19:44:36.000000000 -0400
25574 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25575 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25576 if ((unsigned long)buf & alignment
25577 || blk_rq_bytes(rq) & q->dma_pad_mask
25578 - || object_is_on_stack(buf))
25579 + || object_starts_on_stack(buf))
25580 drive->dma = 0;
25581 }
25582 }
25583 diff -urNp linux-2.6.39.4/drivers/ide/ide-floppy.c linux-2.6.39.4/drivers/ide/ide-floppy.c
25584 --- linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-05-19 00:06:34.000000000 -0400
25585 +++ linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-08-05 19:44:36.000000000 -0400
25586 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25587 u8 pc_buf[256], header_len, desc_cnt;
25588 int i, rc = 1, blocks, length;
25589
25590 + pax_track_stack();
25591 +
25592 ide_debug_log(IDE_DBG_FUNC, "enter");
25593
25594 drive->bios_cyl = 0;
25595 diff -urNp linux-2.6.39.4/drivers/ide/setup-pci.c linux-2.6.39.4/drivers/ide/setup-pci.c
25596 --- linux-2.6.39.4/drivers/ide/setup-pci.c 2011-05-19 00:06:34.000000000 -0400
25597 +++ linux-2.6.39.4/drivers/ide/setup-pci.c 2011-08-05 19:44:36.000000000 -0400
25598 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25599 int ret, i, n_ports = dev2 ? 4 : 2;
25600 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25601
25602 + pax_track_stack();
25603 +
25604 for (i = 0; i < n_ports / 2; i++) {
25605 ret = ide_setup_pci_controller(pdev[i], d, !i);
25606 if (ret < 0)
25607 diff -urNp linux-2.6.39.4/drivers/infiniband/core/cm.c linux-2.6.39.4/drivers/infiniband/core/cm.c
25608 --- linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-05-19 00:06:34.000000000 -0400
25609 +++ linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-08-05 19:44:36.000000000 -0400
25610 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25611
25612 struct cm_counter_group {
25613 struct kobject obj;
25614 - atomic_long_t counter[CM_ATTR_COUNT];
25615 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25616 };
25617
25618 struct cm_counter_attribute {
25619 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25620 struct ib_mad_send_buf *msg = NULL;
25621 int ret;
25622
25623 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25624 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25625 counter[CM_REQ_COUNTER]);
25626
25627 /* Quick state check to discard duplicate REQs. */
25628 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25629 if (!cm_id_priv)
25630 return;
25631
25632 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25633 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25634 counter[CM_REP_COUNTER]);
25635 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25636 if (ret)
25637 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25638 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25639 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25640 spin_unlock_irq(&cm_id_priv->lock);
25641 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25642 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25643 counter[CM_RTU_COUNTER]);
25644 goto out;
25645 }
25646 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25647 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25648 dreq_msg->local_comm_id);
25649 if (!cm_id_priv) {
25650 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25651 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25652 counter[CM_DREQ_COUNTER]);
25653 cm_issue_drep(work->port, work->mad_recv_wc);
25654 return -EINVAL;
25655 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25656 case IB_CM_MRA_REP_RCVD:
25657 break;
25658 case IB_CM_TIMEWAIT:
25659 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25660 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25661 counter[CM_DREQ_COUNTER]);
25662 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25663 goto unlock;
25664 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25665 cm_free_msg(msg);
25666 goto deref;
25667 case IB_CM_DREQ_RCVD:
25668 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25669 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25670 counter[CM_DREQ_COUNTER]);
25671 goto unlock;
25672 default:
25673 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25674 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25675 cm_id_priv->msg, timeout)) {
25676 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25677 - atomic_long_inc(&work->port->
25678 + atomic_long_inc_unchecked(&work->port->
25679 counter_group[CM_RECV_DUPLICATES].
25680 counter[CM_MRA_COUNTER]);
25681 goto out;
25682 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25683 break;
25684 case IB_CM_MRA_REQ_RCVD:
25685 case IB_CM_MRA_REP_RCVD:
25686 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25687 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25688 counter[CM_MRA_COUNTER]);
25689 /* fall through */
25690 default:
25691 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25692 case IB_CM_LAP_IDLE:
25693 break;
25694 case IB_CM_MRA_LAP_SENT:
25695 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25696 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25697 counter[CM_LAP_COUNTER]);
25698 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25699 goto unlock;
25700 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25701 cm_free_msg(msg);
25702 goto deref;
25703 case IB_CM_LAP_RCVD:
25704 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25705 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25706 counter[CM_LAP_COUNTER]);
25707 goto unlock;
25708 default:
25709 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25710 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25711 if (cur_cm_id_priv) {
25712 spin_unlock_irq(&cm.lock);
25713 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25714 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25715 counter[CM_SIDR_REQ_COUNTER]);
25716 goto out; /* Duplicate message. */
25717 }
25718 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25719 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25720 msg->retries = 1;
25721
25722 - atomic_long_add(1 + msg->retries,
25723 + atomic_long_add_unchecked(1 + msg->retries,
25724 &port->counter_group[CM_XMIT].counter[attr_index]);
25725 if (msg->retries)
25726 - atomic_long_add(msg->retries,
25727 + atomic_long_add_unchecked(msg->retries,
25728 &port->counter_group[CM_XMIT_RETRIES].
25729 counter[attr_index]);
25730
25731 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25732 }
25733
25734 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25735 - atomic_long_inc(&port->counter_group[CM_RECV].
25736 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25737 counter[attr_id - CM_ATTR_ID_OFFSET]);
25738
25739 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25740 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25741 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25742
25743 return sprintf(buf, "%ld\n",
25744 - atomic_long_read(&group->counter[cm_attr->index]));
25745 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25746 }
25747
25748 static const struct sysfs_ops cm_counter_ops = {
25749 diff -urNp linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c
25750 --- linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-05-19 00:06:34.000000000 -0400
25751 +++ linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-08-05 19:44:36.000000000 -0400
25752 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25753
25754 struct task_struct *thread;
25755
25756 - atomic_t req_ser;
25757 - atomic_t flush_ser;
25758 + atomic_unchecked_t req_ser;
25759 + atomic_unchecked_t flush_ser;
25760
25761 wait_queue_head_t force_wait;
25762 };
25763 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25764 struct ib_fmr_pool *pool = pool_ptr;
25765
25766 do {
25767 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25768 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25769 ib_fmr_batch_release(pool);
25770
25771 - atomic_inc(&pool->flush_ser);
25772 + atomic_inc_unchecked(&pool->flush_ser);
25773 wake_up_interruptible(&pool->force_wait);
25774
25775 if (pool->flush_function)
25776 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25777 }
25778
25779 set_current_state(TASK_INTERRUPTIBLE);
25780 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25781 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25782 !kthread_should_stop())
25783 schedule();
25784 __set_current_state(TASK_RUNNING);
25785 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25786 pool->dirty_watermark = params->dirty_watermark;
25787 pool->dirty_len = 0;
25788 spin_lock_init(&pool->pool_lock);
25789 - atomic_set(&pool->req_ser, 0);
25790 - atomic_set(&pool->flush_ser, 0);
25791 + atomic_set_unchecked(&pool->req_ser, 0);
25792 + atomic_set_unchecked(&pool->flush_ser, 0);
25793 init_waitqueue_head(&pool->force_wait);
25794
25795 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25796 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25797 }
25798 spin_unlock_irq(&pool->pool_lock);
25799
25800 - serial = atomic_inc_return(&pool->req_ser);
25801 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25802 wake_up_process(pool->thread);
25803
25804 if (wait_event_interruptible(pool->force_wait,
25805 - atomic_read(&pool->flush_ser) - serial >= 0))
25806 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25807 return -EINTR;
25808
25809 return 0;
25810 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25811 } else {
25812 list_add_tail(&fmr->list, &pool->dirty_list);
25813 if (++pool->dirty_len >= pool->dirty_watermark) {
25814 - atomic_inc(&pool->req_ser);
25815 + atomic_inc_unchecked(&pool->req_ser);
25816 wake_up_process(pool->thread);
25817 }
25818 }
25819 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c
25820 --- linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-05-19 00:06:34.000000000 -0400
25821 +++ linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-05 19:44:36.000000000 -0400
25822 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25823 int err;
25824 struct fw_ri_tpte tpt;
25825 u32 stag_idx;
25826 - static atomic_t key;
25827 + static atomic_unchecked_t key;
25828
25829 if (c4iw_fatal_error(rdev))
25830 return -EIO;
25831 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25832 &rdev->resource.tpt_fifo_lock);
25833 if (!stag_idx)
25834 return -ENOMEM;
25835 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25836 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25837 }
25838 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25839 __func__, stag_state, type, pdid, stag_idx);
25840 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c
25841 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-19 00:06:34.000000000 -0400
25842 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-05 19:44:36.000000000 -0400
25843 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25844 struct infinipath_counters counters;
25845 struct ipath_devdata *dd;
25846
25847 + pax_track_stack();
25848 +
25849 dd = file->f_path.dentry->d_inode->i_private;
25850 dd->ipath_f_read_counters(dd, &counters);
25851
25852 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c
25853 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-19 00:06:34.000000000 -0400
25854 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-05 19:44:36.000000000 -0400
25855 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25856 struct ib_atomic_eth *ateth;
25857 struct ipath_ack_entry *e;
25858 u64 vaddr;
25859 - atomic64_t *maddr;
25860 + atomic64_unchecked_t *maddr;
25861 u64 sdata;
25862 u32 rkey;
25863 u8 next;
25864 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25865 IB_ACCESS_REMOTE_ATOMIC)))
25866 goto nack_acc_unlck;
25867 /* Perform atomic OP and save result. */
25868 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25869 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25870 sdata = be64_to_cpu(ateth->swap_data);
25871 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25872 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25873 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25874 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25875 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25876 be64_to_cpu(ateth->compare_data),
25877 sdata);
25878 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25879 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-19 00:06:34.000000000 -0400
25880 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-05 19:44:36.000000000 -0400
25881 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25882 unsigned long flags;
25883 struct ib_wc wc;
25884 u64 sdata;
25885 - atomic64_t *maddr;
25886 + atomic64_unchecked_t *maddr;
25887 enum ib_wc_status send_status;
25888
25889 /*
25890 @@ -382,11 +382,11 @@ again:
25891 IB_ACCESS_REMOTE_ATOMIC)))
25892 goto acc_err;
25893 /* Perform atomic OP and save result. */
25894 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25895 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25896 sdata = wqe->wr.wr.atomic.compare_add;
25897 *(u64 *) sqp->s_sge.sge.vaddr =
25898 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25899 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25900 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25901 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25902 sdata, wqe->wr.wr.atomic.swap);
25903 goto send_comp;
25904 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c
25905 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-05-19 00:06:34.000000000 -0400
25906 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-08-05 19:44:36.000000000 -0400
25907 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25908 LIST_HEAD(nes_adapter_list);
25909 static LIST_HEAD(nes_dev_list);
25910
25911 -atomic_t qps_destroyed;
25912 +atomic_unchecked_t qps_destroyed;
25913
25914 static unsigned int ee_flsh_adapter;
25915 static unsigned int sysfs_nonidx_addr;
25916 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25917 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25918 struct nes_adapter *nesadapter = nesdev->nesadapter;
25919
25920 - atomic_inc(&qps_destroyed);
25921 + atomic_inc_unchecked(&qps_destroyed);
25922
25923 /* Free the control structures */
25924
25925 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c
25926 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-05-19 00:06:34.000000000 -0400
25927 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-05 19:44:36.000000000 -0400
25928 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25929 u32 cm_packets_retrans;
25930 u32 cm_packets_created;
25931 u32 cm_packets_received;
25932 -atomic_t cm_listens_created;
25933 -atomic_t cm_listens_destroyed;
25934 +atomic_unchecked_t cm_listens_created;
25935 +atomic_unchecked_t cm_listens_destroyed;
25936 u32 cm_backlog_drops;
25937 -atomic_t cm_loopbacks;
25938 -atomic_t cm_nodes_created;
25939 -atomic_t cm_nodes_destroyed;
25940 -atomic_t cm_accel_dropped_pkts;
25941 -atomic_t cm_resets_recvd;
25942 +atomic_unchecked_t cm_loopbacks;
25943 +atomic_unchecked_t cm_nodes_created;
25944 +atomic_unchecked_t cm_nodes_destroyed;
25945 +atomic_unchecked_t cm_accel_dropped_pkts;
25946 +atomic_unchecked_t cm_resets_recvd;
25947
25948 static inline int mini_cm_accelerated(struct nes_cm_core *,
25949 struct nes_cm_node *);
25950 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25951
25952 static struct nes_cm_core *g_cm_core;
25953
25954 -atomic_t cm_connects;
25955 -atomic_t cm_accepts;
25956 -atomic_t cm_disconnects;
25957 -atomic_t cm_closes;
25958 -atomic_t cm_connecteds;
25959 -atomic_t cm_connect_reqs;
25960 -atomic_t cm_rejects;
25961 +atomic_unchecked_t cm_connects;
25962 +atomic_unchecked_t cm_accepts;
25963 +atomic_unchecked_t cm_disconnects;
25964 +atomic_unchecked_t cm_closes;
25965 +atomic_unchecked_t cm_connecteds;
25966 +atomic_unchecked_t cm_connect_reqs;
25967 +atomic_unchecked_t cm_rejects;
25968
25969
25970 /**
25971 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25972 kfree(listener);
25973 listener = NULL;
25974 ret = 0;
25975 - atomic_inc(&cm_listens_destroyed);
25976 + atomic_inc_unchecked(&cm_listens_destroyed);
25977 } else {
25978 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25979 }
25980 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25981 cm_node->rem_mac);
25982
25983 add_hte_node(cm_core, cm_node);
25984 - atomic_inc(&cm_nodes_created);
25985 + atomic_inc_unchecked(&cm_nodes_created);
25986
25987 return cm_node;
25988 }
25989 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25990 }
25991
25992 atomic_dec(&cm_core->node_cnt);
25993 - atomic_inc(&cm_nodes_destroyed);
25994 + atomic_inc_unchecked(&cm_nodes_destroyed);
25995 nesqp = cm_node->nesqp;
25996 if (nesqp) {
25997 nesqp->cm_node = NULL;
25998 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25999
26000 static void drop_packet(struct sk_buff *skb)
26001 {
26002 - atomic_inc(&cm_accel_dropped_pkts);
26003 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26004 dev_kfree_skb_any(skb);
26005 }
26006
26007 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
26008 {
26009
26010 int reset = 0; /* whether to send reset in case of err.. */
26011 - atomic_inc(&cm_resets_recvd);
26012 + atomic_inc_unchecked(&cm_resets_recvd);
26013 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
26014 " refcnt=%d\n", cm_node, cm_node->state,
26015 atomic_read(&cm_node->ref_count));
26016 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
26017 rem_ref_cm_node(cm_node->cm_core, cm_node);
26018 return NULL;
26019 }
26020 - atomic_inc(&cm_loopbacks);
26021 + atomic_inc_unchecked(&cm_loopbacks);
26022 loopbackremotenode->loopbackpartner = cm_node;
26023 loopbackremotenode->tcp_cntxt.rcv_wscale =
26024 NES_CM_DEFAULT_RCV_WND_SCALE;
26025 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
26026 add_ref_cm_node(cm_node);
26027 } else if (cm_node->state == NES_CM_STATE_TSA) {
26028 rem_ref_cm_node(cm_core, cm_node);
26029 - atomic_inc(&cm_accel_dropped_pkts);
26030 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26031 dev_kfree_skb_any(skb);
26032 break;
26033 }
26034 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
26035
26036 if ((cm_id) && (cm_id->event_handler)) {
26037 if (issue_disconn) {
26038 - atomic_inc(&cm_disconnects);
26039 + atomic_inc_unchecked(&cm_disconnects);
26040 cm_event.event = IW_CM_EVENT_DISCONNECT;
26041 cm_event.status = disconn_status;
26042 cm_event.local_addr = cm_id->local_addr;
26043 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
26044 }
26045
26046 if (issue_close) {
26047 - atomic_inc(&cm_closes);
26048 + atomic_inc_unchecked(&cm_closes);
26049 nes_disconnect(nesqp, 1);
26050
26051 cm_id->provider_data = nesqp;
26052 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
26053
26054 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
26055 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
26056 - atomic_inc(&cm_accepts);
26057 + atomic_inc_unchecked(&cm_accepts);
26058
26059 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26060 netdev_refcnt_read(nesvnic->netdev));
26061 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26062
26063 struct nes_cm_core *cm_core;
26064
26065 - atomic_inc(&cm_rejects);
26066 + atomic_inc_unchecked(&cm_rejects);
26067 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26068 loopback = cm_node->loopbackpartner;
26069 cm_core = cm_node->cm_core;
26070 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26071 ntohl(cm_id->local_addr.sin_addr.s_addr),
26072 ntohs(cm_id->local_addr.sin_port));
26073
26074 - atomic_inc(&cm_connects);
26075 + atomic_inc_unchecked(&cm_connects);
26076 nesqp->active_conn = 1;
26077
26078 /* cache the cm_id in the qp */
26079 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26080 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26081 return err;
26082 }
26083 - atomic_inc(&cm_listens_created);
26084 + atomic_inc_unchecked(&cm_listens_created);
26085 }
26086
26087 cm_id->add_ref(cm_id);
26088 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26089 if (nesqp->destroyed) {
26090 return;
26091 }
26092 - atomic_inc(&cm_connecteds);
26093 + atomic_inc_unchecked(&cm_connecteds);
26094 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26095 " local port 0x%04X. jiffies = %lu.\n",
26096 nesqp->hwqp.qp_id,
26097 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26098
26099 cm_id->add_ref(cm_id);
26100 ret = cm_id->event_handler(cm_id, &cm_event);
26101 - atomic_inc(&cm_closes);
26102 + atomic_inc_unchecked(&cm_closes);
26103 cm_event.event = IW_CM_EVENT_CLOSE;
26104 cm_event.status = IW_CM_EVENT_STATUS_OK;
26105 cm_event.provider_data = cm_id->provider_data;
26106 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26107 return;
26108 cm_id = cm_node->cm_id;
26109
26110 - atomic_inc(&cm_connect_reqs);
26111 + atomic_inc_unchecked(&cm_connect_reqs);
26112 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26113 cm_node, cm_id, jiffies);
26114
26115 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26116 return;
26117 cm_id = cm_node->cm_id;
26118
26119 - atomic_inc(&cm_connect_reqs);
26120 + atomic_inc_unchecked(&cm_connect_reqs);
26121 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26122 cm_node, cm_id, jiffies);
26123
26124 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h
26125 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-05-19 00:06:34.000000000 -0400
26126 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-08-05 19:44:36.000000000 -0400
26127 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26128 extern unsigned int wqm_quanta;
26129 extern struct list_head nes_adapter_list;
26130
26131 -extern atomic_t cm_connects;
26132 -extern atomic_t cm_accepts;
26133 -extern atomic_t cm_disconnects;
26134 -extern atomic_t cm_closes;
26135 -extern atomic_t cm_connecteds;
26136 -extern atomic_t cm_connect_reqs;
26137 -extern atomic_t cm_rejects;
26138 -extern atomic_t mod_qp_timouts;
26139 -extern atomic_t qps_created;
26140 -extern atomic_t qps_destroyed;
26141 -extern atomic_t sw_qps_destroyed;
26142 +extern atomic_unchecked_t cm_connects;
26143 +extern atomic_unchecked_t cm_accepts;
26144 +extern atomic_unchecked_t cm_disconnects;
26145 +extern atomic_unchecked_t cm_closes;
26146 +extern atomic_unchecked_t cm_connecteds;
26147 +extern atomic_unchecked_t cm_connect_reqs;
26148 +extern atomic_unchecked_t cm_rejects;
26149 +extern atomic_unchecked_t mod_qp_timouts;
26150 +extern atomic_unchecked_t qps_created;
26151 +extern atomic_unchecked_t qps_destroyed;
26152 +extern atomic_unchecked_t sw_qps_destroyed;
26153 extern u32 mh_detected;
26154 extern u32 mh_pauses_sent;
26155 extern u32 cm_packets_sent;
26156 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26157 extern u32 cm_packets_received;
26158 extern u32 cm_packets_dropped;
26159 extern u32 cm_packets_retrans;
26160 -extern atomic_t cm_listens_created;
26161 -extern atomic_t cm_listens_destroyed;
26162 +extern atomic_unchecked_t cm_listens_created;
26163 +extern atomic_unchecked_t cm_listens_destroyed;
26164 extern u32 cm_backlog_drops;
26165 -extern atomic_t cm_loopbacks;
26166 -extern atomic_t cm_nodes_created;
26167 -extern atomic_t cm_nodes_destroyed;
26168 -extern atomic_t cm_accel_dropped_pkts;
26169 -extern atomic_t cm_resets_recvd;
26170 +extern atomic_unchecked_t cm_loopbacks;
26171 +extern atomic_unchecked_t cm_nodes_created;
26172 +extern atomic_unchecked_t cm_nodes_destroyed;
26173 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26174 +extern atomic_unchecked_t cm_resets_recvd;
26175
26176 extern u32 int_mod_timer_init;
26177 extern u32 int_mod_cq_depth_256;
26178 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c
26179 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-05-19 00:06:34.000000000 -0400
26180 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-05 19:44:36.000000000 -0400
26181 @@ -1302,31 +1302,31 @@ static void nes_netdev_get_ethtool_stats
26182 target_stat_values[++index] = mh_detected;
26183 target_stat_values[++index] = mh_pauses_sent;
26184 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26185 - target_stat_values[++index] = atomic_read(&cm_connects);
26186 - target_stat_values[++index] = atomic_read(&cm_accepts);
26187 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26188 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26189 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26190 - target_stat_values[++index] = atomic_read(&cm_rejects);
26191 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26192 - target_stat_values[++index] = atomic_read(&qps_created);
26193 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26194 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26195 - target_stat_values[++index] = atomic_read(&cm_closes);
26196 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26197 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26198 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26199 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26200 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26201 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26202 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26203 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26204 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26205 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26206 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26207 target_stat_values[++index] = cm_packets_sent;
26208 target_stat_values[++index] = cm_packets_bounced;
26209 target_stat_values[++index] = cm_packets_created;
26210 target_stat_values[++index] = cm_packets_received;
26211 target_stat_values[++index] = cm_packets_dropped;
26212 target_stat_values[++index] = cm_packets_retrans;
26213 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26214 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26215 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26216 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26217 target_stat_values[++index] = cm_backlog_drops;
26218 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26219 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26220 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26221 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26222 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26223 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26224 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26225 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26226 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26227 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26228 target_stat_values[++index] = nesadapter->free_4kpbl;
26229 target_stat_values[++index] = nesadapter->free_256pbl;
26230 target_stat_values[++index] = int_mod_timer_init;
26231 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c
26232 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-19 00:06:34.000000000 -0400
26233 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-05 19:44:36.000000000 -0400
26234 @@ -46,9 +46,9 @@
26235
26236 #include <rdma/ib_umem.h>
26237
26238 -atomic_t mod_qp_timouts;
26239 -atomic_t qps_created;
26240 -atomic_t sw_qps_destroyed;
26241 +atomic_unchecked_t mod_qp_timouts;
26242 +atomic_unchecked_t qps_created;
26243 +atomic_unchecked_t sw_qps_destroyed;
26244
26245 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26246
26247 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26248 if (init_attr->create_flags)
26249 return ERR_PTR(-EINVAL);
26250
26251 - atomic_inc(&qps_created);
26252 + atomic_inc_unchecked(&qps_created);
26253 switch (init_attr->qp_type) {
26254 case IB_QPT_RC:
26255 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26256 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26257 struct iw_cm_event cm_event;
26258 int ret;
26259
26260 - atomic_inc(&sw_qps_destroyed);
26261 + atomic_inc_unchecked(&sw_qps_destroyed);
26262 nesqp->destroyed = 1;
26263
26264 /* Blow away the connection if it exists. */
26265 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h
26266 --- linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-05-19 00:06:34.000000000 -0400
26267 +++ linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-08-05 20:34:06.000000000 -0400
26268 @@ -51,6 +51,7 @@
26269 #include <linux/completion.h>
26270 #include <linux/kref.h>
26271 #include <linux/sched.h>
26272 +#include <linux/slab.h>
26273
26274 #include "qib_common.h"
26275 #include "qib_verbs.h"
26276 diff -urNp linux-2.6.39.4/drivers/input/gameport/gameport.c linux-2.6.39.4/drivers/input/gameport/gameport.c
26277 --- linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-05-19 00:06:34.000000000 -0400
26278 +++ linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-08-05 19:44:37.000000000 -0400
26279 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26280 */
26281 static void gameport_init_port(struct gameport *gameport)
26282 {
26283 - static atomic_t gameport_no = ATOMIC_INIT(0);
26284 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26285
26286 __module_get(THIS_MODULE);
26287
26288 mutex_init(&gameport->drv_mutex);
26289 device_initialize(&gameport->dev);
26290 dev_set_name(&gameport->dev, "gameport%lu",
26291 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26292 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26293 gameport->dev.bus = &gameport_bus;
26294 gameport->dev.release = gameport_release_port;
26295 if (gameport->parent)
26296 diff -urNp linux-2.6.39.4/drivers/input/input.c linux-2.6.39.4/drivers/input/input.c
26297 --- linux-2.6.39.4/drivers/input/input.c 2011-07-09 09:18:51.000000000 -0400
26298 +++ linux-2.6.39.4/drivers/input/input.c 2011-08-05 19:44:37.000000000 -0400
26299 @@ -1815,7 +1815,7 @@ static void input_cleanse_bitmasks(struc
26300 */
26301 int input_register_device(struct input_dev *dev)
26302 {
26303 - static atomic_t input_no = ATOMIC_INIT(0);
26304 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26305 struct input_handler *handler;
26306 const char *path;
26307 int error;
26308 @@ -1852,7 +1852,7 @@ int input_register_device(struct input_d
26309 dev->setkeycode = input_default_setkeycode;
26310
26311 dev_set_name(&dev->dev, "input%ld",
26312 - (unsigned long) atomic_inc_return(&input_no) - 1);
26313 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26314
26315 error = device_add(&dev->dev);
26316 if (error)
26317 diff -urNp linux-2.6.39.4/drivers/input/joystick/sidewinder.c linux-2.6.39.4/drivers/input/joystick/sidewinder.c
26318 --- linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-05-19 00:06:34.000000000 -0400
26319 +++ linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-08-05 19:44:37.000000000 -0400
26320 @@ -30,6 +30,7 @@
26321 #include <linux/kernel.h>
26322 #include <linux/module.h>
26323 #include <linux/slab.h>
26324 +#include <linux/sched.h>
26325 #include <linux/init.h>
26326 #include <linux/input.h>
26327 #include <linux/gameport.h>
26328 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26329 unsigned char buf[SW_LENGTH];
26330 int i;
26331
26332 + pax_track_stack();
26333 +
26334 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26335
26336 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26337 diff -urNp linux-2.6.39.4/drivers/input/joystick/xpad.c linux-2.6.39.4/drivers/input/joystick/xpad.c
26338 --- linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-05-19 00:06:34.000000000 -0400
26339 +++ linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-08-05 19:44:37.000000000 -0400
26340 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26341
26342 static int xpad_led_probe(struct usb_xpad *xpad)
26343 {
26344 - static atomic_t led_seq = ATOMIC_INIT(0);
26345 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26346 long led_no;
26347 struct xpad_led *led;
26348 struct led_classdev *led_cdev;
26349 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26350 if (!led)
26351 return -ENOMEM;
26352
26353 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26354 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26355
26356 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26357 led->xpad = xpad;
26358 diff -urNp linux-2.6.39.4/drivers/input/mousedev.c linux-2.6.39.4/drivers/input/mousedev.c
26359 --- linux-2.6.39.4/drivers/input/mousedev.c 2011-07-09 09:18:51.000000000 -0400
26360 +++ linux-2.6.39.4/drivers/input/mousedev.c 2011-08-05 19:44:37.000000000 -0400
26361 @@ -764,7 +764,7 @@ static ssize_t mousedev_read(struct file
26362
26363 spin_unlock_irq(&client->packet_lock);
26364
26365 - if (copy_to_user(buffer, data, count))
26366 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26367 return -EFAULT;
26368
26369 return count;
26370 diff -urNp linux-2.6.39.4/drivers/input/serio/serio.c linux-2.6.39.4/drivers/input/serio/serio.c
26371 --- linux-2.6.39.4/drivers/input/serio/serio.c 2011-05-19 00:06:34.000000000 -0400
26372 +++ linux-2.6.39.4/drivers/input/serio/serio.c 2011-08-05 19:44:37.000000000 -0400
26373 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26374 */
26375 static void serio_init_port(struct serio *serio)
26376 {
26377 - static atomic_t serio_no = ATOMIC_INIT(0);
26378 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26379
26380 __module_get(THIS_MODULE);
26381
26382 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26383 mutex_init(&serio->drv_mutex);
26384 device_initialize(&serio->dev);
26385 dev_set_name(&serio->dev, "serio%ld",
26386 - (long)atomic_inc_return(&serio_no) - 1);
26387 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26388 serio->dev.bus = &serio_bus;
26389 serio->dev.release = serio_release_port;
26390 serio->dev.groups = serio_device_attr_groups;
26391 diff -urNp linux-2.6.39.4/drivers/isdn/capi/capi.c linux-2.6.39.4/drivers/isdn/capi/capi.c
26392 --- linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-05-19 00:06:34.000000000 -0400
26393 +++ linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-08-05 19:44:37.000000000 -0400
26394 @@ -89,8 +89,8 @@ struct capiminor {
26395
26396 struct capi20_appl *ap;
26397 u32 ncci;
26398 - atomic_t datahandle;
26399 - atomic_t msgid;
26400 + atomic_unchecked_t datahandle;
26401 + atomic_unchecked_t msgid;
26402
26403 struct tty_port port;
26404 int ttyinstop;
26405 @@ -414,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *m
26406 capimsg_setu16(s, 2, mp->ap->applid);
26407 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26408 capimsg_setu8 (s, 5, CAPI_RESP);
26409 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26410 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26411 capimsg_setu32(s, 8, mp->ncci);
26412 capimsg_setu16(s, 12, datahandle);
26413 }
26414 @@ -547,14 +547,14 @@ static void handle_minor_send(struct cap
26415 mp->outbytes -= len;
26416 spin_unlock_bh(&mp->outlock);
26417
26418 - datahandle = atomic_inc_return(&mp->datahandle);
26419 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26420 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26421 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26422 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26423 capimsg_setu16(skb->data, 2, mp->ap->applid);
26424 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26425 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26426 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26427 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26428 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26429 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26430 capimsg_setu16(skb->data, 16, len); /* Data length */
26431 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/common.c linux-2.6.39.4/drivers/isdn/gigaset/common.c
26432 --- linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-05-19 00:06:34.000000000 -0400
26433 +++ linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-08-05 19:44:37.000000000 -0400
26434 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26435 cs->commands_pending = 0;
26436 cs->cur_at_seq = 0;
26437 cs->gotfwver = -1;
26438 - cs->open_count = 0;
26439 + local_set(&cs->open_count, 0);
26440 cs->dev = NULL;
26441 cs->tty = NULL;
26442 cs->tty_dev = NULL;
26443 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h
26444 --- linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-05-19 00:06:34.000000000 -0400
26445 +++ linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-08-05 19:44:37.000000000 -0400
26446 @@ -35,6 +35,7 @@
26447 #include <linux/tty_driver.h>
26448 #include <linux/list.h>
26449 #include <asm/atomic.h>
26450 +#include <asm/local.h>
26451
26452 #define GIG_VERSION {0, 5, 0, 0}
26453 #define GIG_COMPAT {0, 4, 0, 0}
26454 @@ -433,7 +434,7 @@ struct cardstate {
26455 spinlock_t cmdlock;
26456 unsigned curlen, cmdbytes;
26457
26458 - unsigned open_count;
26459 + local_t open_count;
26460 struct tty_struct *tty;
26461 struct tasklet_struct if_wake_tasklet;
26462 unsigned control_state;
26463 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/interface.c linux-2.6.39.4/drivers/isdn/gigaset/interface.c
26464 --- linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-05-19 00:06:34.000000000 -0400
26465 +++ linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-08-05 19:44:37.000000000 -0400
26466 @@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
26467 return -ERESTARTSYS;
26468 tty->driver_data = cs;
26469
26470 - ++cs->open_count;
26471 -
26472 - if (cs->open_count == 1) {
26473 + if (local_inc_return(&cs->open_count) == 1) {
26474 spin_lock_irqsave(&cs->lock, flags);
26475 cs->tty = tty;
26476 spin_unlock_irqrestore(&cs->lock, flags);
26477 @@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
26478
26479 if (!cs->connected)
26480 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26481 - else if (!cs->open_count)
26482 + else if (!local_read(&cs->open_count))
26483 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26484 else {
26485 - if (!--cs->open_count) {
26486 + if (!local_dec_return(&cs->open_count)) {
26487 spin_lock_irqsave(&cs->lock, flags);
26488 cs->tty = NULL;
26489 spin_unlock_irqrestore(&cs->lock, flags);
26490 @@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
26491 if (!cs->connected) {
26492 gig_dbg(DEBUG_IF, "not connected");
26493 retval = -ENODEV;
26494 - } else if (!cs->open_count)
26495 + } else if (!local_read(&cs->open_count))
26496 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26497 else {
26498 retval = 0;
26499 @@ -358,7 +356,7 @@ static int if_write(struct tty_struct *t
26500 retval = -ENODEV;
26501 goto done;
26502 }
26503 - if (!cs->open_count) {
26504 + if (!local_read(&cs->open_count)) {
26505 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26506 retval = -ENODEV;
26507 goto done;
26508 @@ -411,7 +409,7 @@ static int if_write_room(struct tty_stru
26509 if (!cs->connected) {
26510 gig_dbg(DEBUG_IF, "not connected");
26511 retval = -ENODEV;
26512 - } else if (!cs->open_count)
26513 + } else if (!local_read(&cs->open_count))
26514 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26515 else if (cs->mstate != MS_LOCKED) {
26516 dev_warn(cs->dev, "can't write to unlocked device\n");
26517 @@ -441,7 +439,7 @@ static int if_chars_in_buffer(struct tty
26518
26519 if (!cs->connected)
26520 gig_dbg(DEBUG_IF, "not connected");
26521 - else if (!cs->open_count)
26522 + else if (!local_read(&cs->open_count))
26523 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26524 else if (cs->mstate != MS_LOCKED)
26525 dev_warn(cs->dev, "can't write to unlocked device\n");
26526 @@ -469,7 +467,7 @@ static void if_throttle(struct tty_struc
26527
26528 if (!cs->connected)
26529 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26530 - else if (!cs->open_count)
26531 + else if (!local_read(&cs->open_count))
26532 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26533 else
26534 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26535 @@ -493,7 +491,7 @@ static void if_unthrottle(struct tty_str
26536
26537 if (!cs->connected)
26538 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26539 - else if (!cs->open_count)
26540 + else if (!local_read(&cs->open_count))
26541 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26542 else
26543 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26544 @@ -524,7 +522,7 @@ static void if_set_termios(struct tty_st
26545 goto out;
26546 }
26547
26548 - if (!cs->open_count) {
26549 + if (!local_read(&cs->open_count)) {
26550 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26551 goto out;
26552 }
26553 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c
26554 --- linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-05-19 00:06:34.000000000 -0400
26555 +++ linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-08-05 19:44:37.000000000 -0400
26556 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26557 }
26558 if (left) {
26559 if (t4file->user) {
26560 - if (copy_from_user(buf, dp, left))
26561 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26562 return -EFAULT;
26563 } else {
26564 memcpy(buf, dp, left);
26565 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26566 }
26567 if (left) {
26568 if (config->user) {
26569 - if (copy_from_user(buf, dp, left))
26570 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26571 return -EFAULT;
26572 } else {
26573 memcpy(buf, dp, left);
26574 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c
26575 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-19 00:06:34.000000000 -0400
26576 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-05 19:44:37.000000000 -0400
26577 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26578 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26579 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26580
26581 + pax_track_stack();
26582
26583 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26584 {
26585 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c
26586 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-05-19 00:06:34.000000000 -0400
26587 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-05 19:44:37.000000000 -0400
26588 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26589 IDI_SYNC_REQ req;
26590 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26591
26592 + pax_track_stack();
26593 +
26594 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26595
26596 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26597 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c
26598 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-19 00:06:34.000000000 -0400
26599 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-05 19:44:37.000000000 -0400
26600 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26601 IDI_SYNC_REQ req;
26602 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26603
26604 + pax_track_stack();
26605 +
26606 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26607
26608 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26609 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c
26610 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-19 00:06:34.000000000 -0400
26611 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-05 19:44:37.000000000 -0400
26612 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
26613 IDI_SYNC_REQ req;
26614 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26615
26616 + pax_track_stack();
26617 +
26618 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26619
26620 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26621 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h
26622 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-05-19 00:06:34.000000000 -0400
26623 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:34:06.000000000 -0400
26624 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26625 } diva_didd_add_adapter_t;
26626 typedef struct _diva_didd_remove_adapter {
26627 IDI_CALL p_request;
26628 -} diva_didd_remove_adapter_t;
26629 +} __no_const diva_didd_remove_adapter_t;
26630 typedef struct _diva_didd_read_adapter_array {
26631 void * buffer;
26632 dword length;
26633 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c
26634 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-05-19 00:06:34.000000000 -0400
26635 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-05 19:44:37.000000000 -0400
26636 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26637 IDI_SYNC_REQ req;
26638 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26639
26640 + pax_track_stack();
26641 +
26642 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26643
26644 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26645 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c
26646 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-05-19 00:06:34.000000000 -0400
26647 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-08-05 19:44:37.000000000 -0400
26648 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
26649 dword d;
26650 word w;
26651
26652 + pax_track_stack();
26653 +
26654 a = plci->adapter;
26655 Id = ((word)plci->Id<<8)|a->Id;
26656 PUT_WORD(&SS_Ind[4],0x0000);
26657 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
26658 word j, n, w;
26659 dword d;
26660
26661 + pax_track_stack();
26662 +
26663
26664 for(i=0;i<8;i++) bp_parms[i].length = 0;
26665 for(i=0;i<2;i++) global_config[i].length = 0;
26666 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
26667 const byte llc3[] = {4,3,2,2,6,6,0};
26668 const byte header[] = {0,2,3,3,0,0,0};
26669
26670 + pax_track_stack();
26671 +
26672 for(i=0;i<8;i++) bp_parms[i].length = 0;
26673 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26674 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26675 @@ -14760,6 +14766,8 @@ static void group_optimization(DIVA_CAPI
26676 word appl_number_group_type[MAX_APPL];
26677 PLCI *auxplci;
26678
26679 + pax_track_stack();
26680 +
26681 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26682
26683 if(!a->group_optimization_enabled)
26684 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c
26685 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-19 00:06:34.000000000 -0400
26686 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-05 19:44:37.000000000 -0400
26687 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26688 IDI_SYNC_REQ req;
26689 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26690
26691 + pax_track_stack();
26692 +
26693 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26694
26695 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26696 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26697 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-05-19 00:06:34.000000000 -0400
26698 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:34:06.000000000 -0400
26699 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26700 typedef struct _diva_os_idi_adapter_interface {
26701 diva_init_card_proc_t cleanup_adapter_proc;
26702 diva_cmd_card_proc_t cmd_proc;
26703 -} diva_os_idi_adapter_interface_t;
26704 +} __no_const diva_os_idi_adapter_interface_t;
26705
26706 typedef struct _diva_os_xdi_adapter {
26707 struct list_head link;
26708 diff -urNp linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c
26709 --- linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-05-19 00:06:34.000000000 -0400
26710 +++ linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-08-05 19:44:37.000000000 -0400
26711 @@ -1292,6 +1292,8 @@ isdn_ioctl(struct file *file, uint cmd,
26712 } iocpar;
26713 void __user *argp = (void __user *)arg;
26714
26715 + pax_track_stack();
26716 +
26717 #define name iocpar.name
26718 #define bname iocpar.bname
26719 #define iocts iocpar.iocts
26720 diff -urNp linux-2.6.39.4/drivers/isdn/icn/icn.c linux-2.6.39.4/drivers/isdn/icn/icn.c
26721 --- linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-05-19 00:06:34.000000000 -0400
26722 +++ linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-08-05 19:44:37.000000000 -0400
26723 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26724 if (count > len)
26725 count = len;
26726 if (user) {
26727 - if (copy_from_user(msg, buf, count))
26728 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26729 return -EFAULT;
26730 } else
26731 memcpy(msg, buf, count);
26732 diff -urNp linux-2.6.39.4/drivers/lguest/core.c linux-2.6.39.4/drivers/lguest/core.c
26733 --- linux-2.6.39.4/drivers/lguest/core.c 2011-05-19 00:06:34.000000000 -0400
26734 +++ linux-2.6.39.4/drivers/lguest/core.c 2011-08-05 19:44:37.000000000 -0400
26735 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26736 * it's worked so far. The end address needs +1 because __get_vm_area
26737 * allocates an extra guard page, so we need space for that.
26738 */
26739 +
26740 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26741 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26742 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26743 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26744 +#else
26745 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26746 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26747 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26748 +#endif
26749 +
26750 if (!switcher_vma) {
26751 err = -ENOMEM;
26752 printk("lguest: could not map switcher pages high\n");
26753 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26754 * Now the Switcher is mapped at the right address, we can't fail!
26755 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26756 */
26757 - memcpy(switcher_vma->addr, start_switcher_text,
26758 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26759 end_switcher_text - start_switcher_text);
26760
26761 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26762 diff -urNp linux-2.6.39.4/drivers/lguest/x86/core.c linux-2.6.39.4/drivers/lguest/x86/core.c
26763 --- linux-2.6.39.4/drivers/lguest/x86/core.c 2011-05-19 00:06:34.000000000 -0400
26764 +++ linux-2.6.39.4/drivers/lguest/x86/core.c 2011-08-05 19:44:37.000000000 -0400
26765 @@ -59,7 +59,7 @@ static struct {
26766 /* Offset from where switcher.S was compiled to where we've copied it */
26767 static unsigned long switcher_offset(void)
26768 {
26769 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26770 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26771 }
26772
26773 /* This cpu's struct lguest_pages. */
26774 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26775 * These copies are pretty cheap, so we do them unconditionally: */
26776 /* Save the current Host top-level page directory.
26777 */
26778 +
26779 +#ifdef CONFIG_PAX_PER_CPU_PGD
26780 + pages->state.host_cr3 = read_cr3();
26781 +#else
26782 pages->state.host_cr3 = __pa(current->mm->pgd);
26783 +#endif
26784 +
26785 /*
26786 * Set up the Guest's page tables to see this CPU's pages (and no
26787 * other CPU's pages).
26788 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26789 * compiled-in switcher code and the high-mapped copy we just made.
26790 */
26791 for (i = 0; i < IDT_ENTRIES; i++)
26792 - default_idt_entries[i] += switcher_offset();
26793 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26794
26795 /*
26796 * Set up the Switcher's per-cpu areas.
26797 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26798 * it will be undisturbed when we switch. To change %cs and jump we
26799 * need this structure to feed to Intel's "lcall" instruction.
26800 */
26801 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26802 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26803 lguest_entry.segment = LGUEST_CS;
26804
26805 /*
26806 diff -urNp linux-2.6.39.4/drivers/lguest/x86/switcher_32.S linux-2.6.39.4/drivers/lguest/x86/switcher_32.S
26807 --- linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-05-19 00:06:34.000000000 -0400
26808 +++ linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-08-05 19:44:37.000000000 -0400
26809 @@ -87,6 +87,7 @@
26810 #include <asm/page.h>
26811 #include <asm/segment.h>
26812 #include <asm/lguest.h>
26813 +#include <asm/processor-flags.h>
26814
26815 // We mark the start of the code to copy
26816 // It's placed in .text tho it's never run here
26817 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26818 // Changes type when we load it: damn Intel!
26819 // For after we switch over our page tables
26820 // That entry will be read-only: we'd crash.
26821 +
26822 +#ifdef CONFIG_PAX_KERNEXEC
26823 + mov %cr0, %edx
26824 + xor $X86_CR0_WP, %edx
26825 + mov %edx, %cr0
26826 +#endif
26827 +
26828 movl $(GDT_ENTRY_TSS*8), %edx
26829 ltr %dx
26830
26831 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26832 // Let's clear it again for our return.
26833 // The GDT descriptor of the Host
26834 // Points to the table after two "size" bytes
26835 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26836 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26837 // Clear "used" from type field (byte 5, bit 2)
26838 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26839 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26840 +
26841 +#ifdef CONFIG_PAX_KERNEXEC
26842 + mov %cr0, %eax
26843 + xor $X86_CR0_WP, %eax
26844 + mov %eax, %cr0
26845 +#endif
26846
26847 // Once our page table's switched, the Guest is live!
26848 // The Host fades as we run this final step.
26849 @@ -295,13 +309,12 @@ deliver_to_host:
26850 // I consulted gcc, and it gave
26851 // These instructions, which I gladly credit:
26852 leal (%edx,%ebx,8), %eax
26853 - movzwl (%eax),%edx
26854 - movl 4(%eax), %eax
26855 - xorw %ax, %ax
26856 - orl %eax, %edx
26857 + movl 4(%eax), %edx
26858 + movw (%eax), %dx
26859 // Now the address of the handler's in %edx
26860 // We call it now: its "iret" drops us home.
26861 - jmp *%edx
26862 + ljmp $__KERNEL_CS, $1f
26863 +1: jmp *%edx
26864
26865 // Every interrupt can come to us here
26866 // But we must truly tell each apart.
26867 diff -urNp linux-2.6.39.4/drivers/md/dm.c linux-2.6.39.4/drivers/md/dm.c
26868 --- linux-2.6.39.4/drivers/md/dm.c 2011-05-19 00:06:34.000000000 -0400
26869 +++ linux-2.6.39.4/drivers/md/dm.c 2011-08-05 19:44:37.000000000 -0400
26870 @@ -162,9 +162,9 @@ struct mapped_device {
26871 /*
26872 * Event handling.
26873 */
26874 - atomic_t event_nr;
26875 + atomic_unchecked_t event_nr;
26876 wait_queue_head_t eventq;
26877 - atomic_t uevent_seq;
26878 + atomic_unchecked_t uevent_seq;
26879 struct list_head uevent_list;
26880 spinlock_t uevent_lock; /* Protect access to uevent_list */
26881
26882 @@ -1836,8 +1836,8 @@ static struct mapped_device *alloc_dev(i
26883 rwlock_init(&md->map_lock);
26884 atomic_set(&md->holders, 1);
26885 atomic_set(&md->open_count, 0);
26886 - atomic_set(&md->event_nr, 0);
26887 - atomic_set(&md->uevent_seq, 0);
26888 + atomic_set_unchecked(&md->event_nr, 0);
26889 + atomic_set_unchecked(&md->uevent_seq, 0);
26890 INIT_LIST_HEAD(&md->uevent_list);
26891 spin_lock_init(&md->uevent_lock);
26892
26893 @@ -1971,7 +1971,7 @@ static void event_callback(void *context
26894
26895 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26896
26897 - atomic_inc(&md->event_nr);
26898 + atomic_inc_unchecked(&md->event_nr);
26899 wake_up(&md->eventq);
26900 }
26901
26902 @@ -2547,18 +2547,18 @@ int dm_kobject_uevent(struct mapped_devi
26903
26904 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26905 {
26906 - return atomic_add_return(1, &md->uevent_seq);
26907 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26908 }
26909
26910 uint32_t dm_get_event_nr(struct mapped_device *md)
26911 {
26912 - return atomic_read(&md->event_nr);
26913 + return atomic_read_unchecked(&md->event_nr);
26914 }
26915
26916 int dm_wait_event(struct mapped_device *md, int event_nr)
26917 {
26918 return wait_event_interruptible(md->eventq,
26919 - (event_nr != atomic_read(&md->event_nr)));
26920 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26921 }
26922
26923 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26924 diff -urNp linux-2.6.39.4/drivers/md/dm-ioctl.c linux-2.6.39.4/drivers/md/dm-ioctl.c
26925 --- linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-05-19 00:06:34.000000000 -0400
26926 +++ linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-08-05 19:44:37.000000000 -0400
26927 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26928 cmd == DM_LIST_VERSIONS_CMD)
26929 return 0;
26930
26931 - if ((cmd == DM_DEV_CREATE_CMD)) {
26932 + if (cmd == DM_DEV_CREATE_CMD) {
26933 if (!*param->name) {
26934 DMWARN("name not supplied when creating device");
26935 return -EINVAL;
26936 diff -urNp linux-2.6.39.4/drivers/md/dm-raid1.c linux-2.6.39.4/drivers/md/dm-raid1.c
26937 --- linux-2.6.39.4/drivers/md/dm-raid1.c 2011-05-19 00:06:34.000000000 -0400
26938 +++ linux-2.6.39.4/drivers/md/dm-raid1.c 2011-08-05 19:44:37.000000000 -0400
26939 @@ -42,7 +42,7 @@ enum dm_raid1_error {
26940
26941 struct mirror {
26942 struct mirror_set *ms;
26943 - atomic_t error_count;
26944 + atomic_unchecked_t error_count;
26945 unsigned long error_type;
26946 struct dm_dev *dev;
26947 sector_t offset;
26948 @@ -187,7 +187,7 @@ static struct mirror *get_valid_mirror(s
26949 struct mirror *m;
26950
26951 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26952 - if (!atomic_read(&m->error_count))
26953 + if (!atomic_read_unchecked(&m->error_count))
26954 return m;
26955
26956 return NULL;
26957 @@ -219,7 +219,7 @@ static void fail_mirror(struct mirror *m
26958 * simple way to tell if a device has encountered
26959 * errors.
26960 */
26961 - atomic_inc(&m->error_count);
26962 + atomic_inc_unchecked(&m->error_count);
26963
26964 if (test_and_set_bit(error_type, &m->error_type))
26965 return;
26966 @@ -410,7 +410,7 @@ static struct mirror *choose_mirror(stru
26967 struct mirror *m = get_default_mirror(ms);
26968
26969 do {
26970 - if (likely(!atomic_read(&m->error_count)))
26971 + if (likely(!atomic_read_unchecked(&m->error_count)))
26972 return m;
26973
26974 if (m-- == ms->mirror)
26975 @@ -424,7 +424,7 @@ static int default_ok(struct mirror *m)
26976 {
26977 struct mirror *default_mirror = get_default_mirror(m->ms);
26978
26979 - return !atomic_read(&default_mirror->error_count);
26980 + return !atomic_read_unchecked(&default_mirror->error_count);
26981 }
26982
26983 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26984 @@ -561,7 +561,7 @@ static void do_reads(struct mirror_set *
26985 */
26986 if (likely(region_in_sync(ms, region, 1)))
26987 m = choose_mirror(ms, bio->bi_sector);
26988 - else if (m && atomic_read(&m->error_count))
26989 + else if (m && atomic_read_unchecked(&m->error_count))
26990 m = NULL;
26991
26992 if (likely(m))
26993 @@ -939,7 +939,7 @@ static int get_mirror(struct mirror_set
26994 }
26995
26996 ms->mirror[mirror].ms = ms;
26997 - atomic_set(&(ms->mirror[mirror].error_count), 0);
26998 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26999 ms->mirror[mirror].error_type = 0;
27000 ms->mirror[mirror].offset = offset;
27001
27002 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
27003 */
27004 static char device_status_char(struct mirror *m)
27005 {
27006 - if (!atomic_read(&(m->error_count)))
27007 + if (!atomic_read_unchecked(&(m->error_count)))
27008 return 'A';
27009
27010 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
27011 diff -urNp linux-2.6.39.4/drivers/md/dm-stripe.c linux-2.6.39.4/drivers/md/dm-stripe.c
27012 --- linux-2.6.39.4/drivers/md/dm-stripe.c 2011-05-19 00:06:34.000000000 -0400
27013 +++ linux-2.6.39.4/drivers/md/dm-stripe.c 2011-08-05 19:44:37.000000000 -0400
27014 @@ -20,7 +20,7 @@ struct stripe {
27015 struct dm_dev *dev;
27016 sector_t physical_start;
27017
27018 - atomic_t error_count;
27019 + atomic_unchecked_t error_count;
27020 };
27021
27022 struct stripe_c {
27023 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
27024 kfree(sc);
27025 return r;
27026 }
27027 - atomic_set(&(sc->stripe[i].error_count), 0);
27028 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
27029 }
27030
27031 ti->private = sc;
27032 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
27033 DMEMIT("%d ", sc->stripes);
27034 for (i = 0; i < sc->stripes; i++) {
27035 DMEMIT("%s ", sc->stripe[i].dev->name);
27036 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
27037 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
27038 'D' : 'A';
27039 }
27040 buffer[i] = '\0';
27041 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
27042 */
27043 for (i = 0; i < sc->stripes; i++)
27044 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
27045 - atomic_inc(&(sc->stripe[i].error_count));
27046 - if (atomic_read(&(sc->stripe[i].error_count)) <
27047 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
27048 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
27049 DM_IO_ERROR_THRESHOLD)
27050 schedule_work(&sc->trigger_event);
27051 }
27052 diff -urNp linux-2.6.39.4/drivers/md/dm-table.c linux-2.6.39.4/drivers/md/dm-table.c
27053 --- linux-2.6.39.4/drivers/md/dm-table.c 2011-06-03 00:04:14.000000000 -0400
27054 +++ linux-2.6.39.4/drivers/md/dm-table.c 2011-08-05 19:44:37.000000000 -0400
27055 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
27056 if (!dev_size)
27057 return 0;
27058
27059 - if ((start >= dev_size) || (start + len > dev_size)) {
27060 + if ((start >= dev_size) || (len > dev_size - start)) {
27061 DMWARN("%s: %s too small for target: "
27062 "start=%llu, len=%llu, dev_size=%llu",
27063 dm_device_name(ti->table->md), bdevname(bdev, b),
27064 diff -urNp linux-2.6.39.4/drivers/md/md.c linux-2.6.39.4/drivers/md/md.c
27065 --- linux-2.6.39.4/drivers/md/md.c 2011-07-09 09:18:51.000000000 -0400
27066 +++ linux-2.6.39.4/drivers/md/md.c 2011-08-05 19:44:37.000000000 -0400
27067 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27068 * start build, activate spare
27069 */
27070 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27071 -static atomic_t md_event_count;
27072 +static atomic_unchecked_t md_event_count;
27073 void md_new_event(mddev_t *mddev)
27074 {
27075 - atomic_inc(&md_event_count);
27076 + atomic_inc_unchecked(&md_event_count);
27077 wake_up(&md_event_waiters);
27078 }
27079 EXPORT_SYMBOL_GPL(md_new_event);
27080 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27081 */
27082 static void md_new_event_inintr(mddev_t *mddev)
27083 {
27084 - atomic_inc(&md_event_count);
27085 + atomic_inc_unchecked(&md_event_count);
27086 wake_up(&md_event_waiters);
27087 }
27088
27089 @@ -1454,7 +1454,7 @@ static int super_1_load(mdk_rdev_t *rdev
27090
27091 rdev->preferred_minor = 0xffff;
27092 rdev->data_offset = le64_to_cpu(sb->data_offset);
27093 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27094 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27095
27096 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27097 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27098 @@ -1632,7 +1632,7 @@ static void super_1_sync(mddev_t *mddev,
27099 else
27100 sb->resync_offset = cpu_to_le64(0);
27101
27102 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27103 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27104
27105 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27106 sb->size = cpu_to_le64(mddev->dev_sectors);
27107 @@ -2414,7 +2414,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27108 static ssize_t
27109 errors_show(mdk_rdev_t *rdev, char *page)
27110 {
27111 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27112 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27113 }
27114
27115 static ssize_t
27116 @@ -2423,7 +2423,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27117 char *e;
27118 unsigned long n = simple_strtoul(buf, &e, 10);
27119 if (*buf && (*e == 0 || *e == '\n')) {
27120 - atomic_set(&rdev->corrected_errors, n);
27121 + atomic_set_unchecked(&rdev->corrected_errors, n);
27122 return len;
27123 }
27124 return -EINVAL;
27125 @@ -2779,8 +2779,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27126 rdev->last_read_error.tv_sec = 0;
27127 rdev->last_read_error.tv_nsec = 0;
27128 atomic_set(&rdev->nr_pending, 0);
27129 - atomic_set(&rdev->read_errors, 0);
27130 - atomic_set(&rdev->corrected_errors, 0);
27131 + atomic_set_unchecked(&rdev->read_errors, 0);
27132 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27133
27134 INIT_LIST_HEAD(&rdev->same_set);
27135 init_waitqueue_head(&rdev->blocked_wait);
27136 @@ -6388,7 +6388,7 @@ static int md_seq_show(struct seq_file *
27137
27138 spin_unlock(&pers_lock);
27139 seq_printf(seq, "\n");
27140 - mi->event = atomic_read(&md_event_count);
27141 + mi->event = atomic_read_unchecked(&md_event_count);
27142 return 0;
27143 }
27144 if (v == (void*)2) {
27145 @@ -6477,7 +6477,7 @@ static int md_seq_show(struct seq_file *
27146 chunk_kb ? "KB" : "B");
27147 if (bitmap->file) {
27148 seq_printf(seq, ", file: ");
27149 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27150 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27151 }
27152
27153 seq_printf(seq, "\n");
27154 @@ -6511,7 +6511,7 @@ static int md_seq_open(struct inode *ino
27155 else {
27156 struct seq_file *p = file->private_data;
27157 p->private = mi;
27158 - mi->event = atomic_read(&md_event_count);
27159 + mi->event = atomic_read_unchecked(&md_event_count);
27160 }
27161 return error;
27162 }
27163 @@ -6527,7 +6527,7 @@ static unsigned int mdstat_poll(struct f
27164 /* always allow read */
27165 mask = POLLIN | POLLRDNORM;
27166
27167 - if (mi->event != atomic_read(&md_event_count))
27168 + if (mi->event != atomic_read_unchecked(&md_event_count))
27169 mask |= POLLERR | POLLPRI;
27170 return mask;
27171 }
27172 @@ -6571,7 +6571,7 @@ static int is_mddev_idle(mddev_t *mddev,
27173 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27174 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27175 (int)part_stat_read(&disk->part0, sectors[1]) -
27176 - atomic_read(&disk->sync_io);
27177 + atomic_read_unchecked(&disk->sync_io);
27178 /* sync IO will cause sync_io to increase before the disk_stats
27179 * as sync_io is counted when a request starts, and
27180 * disk_stats is counted when it completes.
27181 diff -urNp linux-2.6.39.4/drivers/md/md.h linux-2.6.39.4/drivers/md/md.h
27182 --- linux-2.6.39.4/drivers/md/md.h 2011-05-19 00:06:34.000000000 -0400
27183 +++ linux-2.6.39.4/drivers/md/md.h 2011-08-05 19:44:37.000000000 -0400
27184 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27185 * only maintained for arrays that
27186 * support hot removal
27187 */
27188 - atomic_t read_errors; /* number of consecutive read errors that
27189 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27190 * we have tried to ignore.
27191 */
27192 struct timespec last_read_error; /* monotonic time since our
27193 * last read error
27194 */
27195 - atomic_t corrected_errors; /* number of corrected read errors,
27196 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27197 * for reporting to userspace and storing
27198 * in superblock.
27199 */
27200 @@ -342,7 +342,7 @@ static inline void rdev_dec_pending(mdk_
27201
27202 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27203 {
27204 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27205 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27206 }
27207
27208 struct mdk_personality
27209 diff -urNp linux-2.6.39.4/drivers/md/raid10.c linux-2.6.39.4/drivers/md/raid10.c
27210 --- linux-2.6.39.4/drivers/md/raid10.c 2011-05-19 00:06:34.000000000 -0400
27211 +++ linux-2.6.39.4/drivers/md/raid10.c 2011-08-05 19:44:37.000000000 -0400
27212 @@ -1209,7 +1209,7 @@ static void end_sync_read(struct bio *bi
27213 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27214 set_bit(R10BIO_Uptodate, &r10_bio->state);
27215 else {
27216 - atomic_add(r10_bio->sectors,
27217 + atomic_add_unchecked(r10_bio->sectors,
27218 &conf->mirrors[d].rdev->corrected_errors);
27219 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27220 md_error(r10_bio->mddev,
27221 @@ -1417,7 +1417,7 @@ static void check_decay_read_errors(mdde
27222 {
27223 struct timespec cur_time_mon;
27224 unsigned long hours_since_last;
27225 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27226 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27227
27228 ktime_get_ts(&cur_time_mon);
27229
27230 @@ -1439,9 +1439,9 @@ static void check_decay_read_errors(mdde
27231 * overflowing the shift of read_errors by hours_since_last.
27232 */
27233 if (hours_since_last >= 8 * sizeof(read_errors))
27234 - atomic_set(&rdev->read_errors, 0);
27235 + atomic_set_unchecked(&rdev->read_errors, 0);
27236 else
27237 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27238 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27239 }
27240
27241 /*
27242 @@ -1476,8 +1476,8 @@ static void fix_read_error(conf_t *conf,
27243 }
27244
27245 check_decay_read_errors(mddev, rdev);
27246 - atomic_inc(&rdev->read_errors);
27247 - cur_read_error_count = atomic_read(&rdev->read_errors);
27248 + atomic_inc_unchecked(&rdev->read_errors);
27249 + cur_read_error_count = atomic_read_unchecked(&rdev->read_errors);
27250 if (cur_read_error_count > max_read_errors) {
27251 rcu_read_unlock();
27252 printk(KERN_NOTICE
27253 @@ -1550,7 +1550,7 @@ static void fix_read_error(conf_t *conf,
27254 test_bit(In_sync, &rdev->flags)) {
27255 atomic_inc(&rdev->nr_pending);
27256 rcu_read_unlock();
27257 - atomic_add(s, &rdev->corrected_errors);
27258 + atomic_add_unchecked(s, &rdev->corrected_errors);
27259 if (sync_page_io(rdev,
27260 r10_bio->devs[sl].addr +
27261 sect,
27262 diff -urNp linux-2.6.39.4/drivers/md/raid1.c linux-2.6.39.4/drivers/md/raid1.c
27263 --- linux-2.6.39.4/drivers/md/raid1.c 2011-05-19 00:06:34.000000000 -0400
27264 +++ linux-2.6.39.4/drivers/md/raid1.c 2011-08-05 19:44:37.000000000 -0400
27265 @@ -1342,7 +1342,7 @@ static void sync_request_write(mddev_t *
27266 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
27267 continue;
27268 rdev = conf->mirrors[d].rdev;
27269 - atomic_add(s, &rdev->corrected_errors);
27270 + atomic_add_unchecked(s, &rdev->corrected_errors);
27271 if (sync_page_io(rdev,
27272 sect,
27273 s<<9,
27274 @@ -1488,7 +1488,7 @@ static void fix_read_error(conf_t *conf,
27275 /* Well, this device is dead */
27276 md_error(mddev, rdev);
27277 else {
27278 - atomic_add(s, &rdev->corrected_errors);
27279 + atomic_add_unchecked(s, &rdev->corrected_errors);
27280 printk(KERN_INFO
27281 "md/raid1:%s: read error corrected "
27282 "(%d sectors at %llu on %s)\n",
27283 diff -urNp linux-2.6.39.4/drivers/md/raid5.c linux-2.6.39.4/drivers/md/raid5.c
27284 --- linux-2.6.39.4/drivers/md/raid5.c 2011-06-25 12:55:22.000000000 -0400
27285 +++ linux-2.6.39.4/drivers/md/raid5.c 2011-08-05 19:44:37.000000000 -0400
27286 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27287 bi->bi_next = NULL;
27288 if ((rw & WRITE) &&
27289 test_bit(R5_ReWrite, &sh->dev[i].flags))
27290 - atomic_add(STRIPE_SECTORS,
27291 + atomic_add_unchecked(STRIPE_SECTORS,
27292 &rdev->corrected_errors);
27293 generic_make_request(bi);
27294 } else {
27295 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27296 clear_bit(R5_ReadError, &sh->dev[i].flags);
27297 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27298 }
27299 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27300 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27301 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27302 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27303 } else {
27304 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27305 int retry = 0;
27306 rdev = conf->disks[i].rdev;
27307
27308 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27309 - atomic_inc(&rdev->read_errors);
27310 + atomic_inc_unchecked(&rdev->read_errors);
27311 if (conf->mddev->degraded >= conf->max_degraded)
27312 printk_rl(KERN_WARNING
27313 "md/raid:%s: read error not correctable "
27314 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27315 (unsigned long long)(sh->sector
27316 + rdev->data_offset),
27317 bdn);
27318 - else if (atomic_read(&rdev->read_errors)
27319 + else if (atomic_read_unchecked(&rdev->read_errors)
27320 > conf->max_nr_stripes)
27321 printk(KERN_WARNING
27322 "md/raid:%s: Too many read errors, failing device %s.\n",
27323 @@ -1947,6 +1947,7 @@ static sector_t compute_blocknr(struct s
27324 sector_t r_sector;
27325 struct stripe_head sh2;
27326
27327 + pax_track_stack();
27328
27329 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27330 stripe = new_sector;
27331 diff -urNp linux-2.6.39.4/drivers/media/common/saa7146_hlp.c linux-2.6.39.4/drivers/media/common/saa7146_hlp.c
27332 --- linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-05-19 00:06:34.000000000 -0400
27333 +++ linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-08-05 19:44:37.000000000 -0400
27334 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27335
27336 int x[32], y[32], w[32], h[32];
27337
27338 + pax_track_stack();
27339 +
27340 /* clear out memory */
27341 memset(&line_list[0], 0x00, sizeof(u32)*32);
27342 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27343 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27344 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-19 00:06:34.000000000 -0400
27345 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-05 19:44:37.000000000 -0400
27346 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27347 u8 buf[HOST_LINK_BUF_SIZE];
27348 int i;
27349
27350 + pax_track_stack();
27351 +
27352 dprintk("%s\n", __func__);
27353
27354 /* check if we have space for a link buf in the rx_buffer */
27355 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27356 unsigned long timeout;
27357 int written;
27358
27359 + pax_track_stack();
27360 +
27361 dprintk("%s\n", __func__);
27362
27363 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27364 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h
27365 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-05-19 00:06:34.000000000 -0400
27366 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:34:06.000000000 -0400
27367 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
27368 union {
27369 dmx_ts_cb ts;
27370 dmx_section_cb sec;
27371 - } cb;
27372 + } __no_const cb;
27373
27374 struct dvb_demux *demux;
27375 void *priv;
27376 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c
27377 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-05-19 00:06:34.000000000 -0400
27378 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:34:06.000000000 -0400
27379 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27380 const struct dvb_device *template, void *priv, int type)
27381 {
27382 struct dvb_device *dvbdev;
27383 - struct file_operations *dvbdevfops;
27384 + file_operations_no_const *dvbdevfops;
27385 struct device *clsdev;
27386 int minor;
27387 int id;
27388 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c
27389 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-05-19 00:06:34.000000000 -0400
27390 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:34:06.000000000 -0400
27391 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27392 struct dib0700_adapter_state {
27393 int (*set_param_save) (struct dvb_frontend *,
27394 struct dvb_frontend_parameters *);
27395 -};
27396 +} __no_const;
27397
27398 static int dib7070_set_param_override(struct dvb_frontend *fe,
27399 struct dvb_frontend_parameters *fep)
27400 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27401 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-19 00:06:34.000000000 -0400
27402 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-05 19:44:37.000000000 -0400
27403 @@ -391,6 +391,8 @@ int dib0700_download_firmware(struct usb
27404
27405 u8 buf[260];
27406
27407 + pax_track_stack();
27408 +
27409 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27410 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27411 hx.addr, hx.len, hx.chk);
27412 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c
27413 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-05-19 00:06:34.000000000 -0400
27414 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-05 20:34:06.000000000 -0400
27415 @@ -95,7 +95,7 @@ struct su3000_state {
27416
27417 struct s6x0_state {
27418 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27419 -};
27420 +} __no_const;
27421
27422 /* debug */
27423 static int dvb_usb_dw2102_debug;
27424 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c
27425 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-05-19 00:06:34.000000000 -0400
27426 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-05 19:44:37.000000000 -0400
27427 @@ -663,6 +663,7 @@ static int lme2510_download_firmware(str
27428 packet_size = 0x31;
27429 len_in = 1;
27430
27431 + pax_track_stack();
27432
27433 info("FRM Starting Firmware Download");
27434
27435 @@ -715,6 +716,8 @@ static void lme_coldreset(struct usb_dev
27436 int ret = 0, len_in;
27437 u8 data[512] = {0};
27438
27439 + pax_track_stack();
27440 +
27441 data[0] = 0x0a;
27442 len_in = 1;
27443 info("FRM Firmware Cold Reset");
27444 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h
27445 --- linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-05-19 00:06:34.000000000 -0400
27446 +++ linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:34:06.000000000 -0400
27447 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
27448 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
27449 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27450 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27451 -};
27452 +} __no_const;
27453
27454 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27455 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27456 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c
27457 --- linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-05-19 00:06:34.000000000 -0400
27458 +++ linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-05 19:44:37.000000000 -0400
27459 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27460 int ret = -1;
27461 int sync;
27462
27463 + pax_track_stack();
27464 +
27465 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27466
27467 fcp = 3000;
27468 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c
27469 --- linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-05-19 00:06:34.000000000 -0400
27470 +++ linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-08-05 19:44:37.000000000 -0400
27471 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27472 u8 tudata[585];
27473 int i;
27474
27475 + pax_track_stack();
27476 +
27477 dprintk("Firmware is %zd bytes\n",fw->size);
27478
27479 /* Get eprom data */
27480 diff -urNp linux-2.6.39.4/drivers/media/radio/radio-cadet.c linux-2.6.39.4/drivers/media/radio/radio-cadet.c
27481 --- linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-05-19 00:06:34.000000000 -0400
27482 +++ linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-08-05 19:44:37.000000000 -0400
27483 @@ -349,7 +349,7 @@ static ssize_t cadet_read(struct file *f
27484 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
27485 mutex_unlock(&dev->lock);
27486
27487 - if (copy_to_user(data, readbuf, i))
27488 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
27489 return -EFAULT;
27490 return i;
27491 }
27492 diff -urNp linux-2.6.39.4/drivers/media/rc/rc-main.c linux-2.6.39.4/drivers/media/rc/rc-main.c
27493 --- linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-05-19 00:06:34.000000000 -0400
27494 +++ linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-08-05 19:44:37.000000000 -0400
27495 @@ -996,7 +996,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
27496
27497 int rc_register_device(struct rc_dev *dev)
27498 {
27499 - static atomic_t devno = ATOMIC_INIT(0);
27500 + static atomic_unchecked_t devno = ATOMIC_INIT(0);
27501 struct rc_map *rc_map;
27502 const char *path;
27503 int rc;
27504 @@ -1019,7 +1019,7 @@ int rc_register_device(struct rc_dev *de
27505 if (dev->close)
27506 dev->input_dev->close = ir_close;
27507
27508 - dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
27509 + dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
27510 dev_set_name(&dev->dev, "rc%ld", dev->devno);
27511 dev_set_drvdata(&dev->dev, dev);
27512 rc = device_add(&dev->dev);
27513 diff -urNp linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c
27514 --- linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-05-19 00:06:34.000000000 -0400
27515 +++ linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-08-05 19:44:37.000000000 -0400
27516 @@ -61,7 +61,7 @@ static struct pci_device_id cx18_pci_tbl
27517
27518 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
27519
27520 -static atomic_t cx18_instance = ATOMIC_INIT(0);
27521 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
27522
27523 /* Parameter declarations */
27524 static int cardtype[CX18_MAX_CARDS];
27525 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27526 struct i2c_client c;
27527 u8 eedata[256];
27528
27529 + pax_track_stack();
27530 +
27531 memset(&c, 0, sizeof(c));
27532 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27533 c.adapter = &cx->i2c_adap[0];
27534 @@ -892,7 +894,7 @@ static int __devinit cx18_probe(struct p
27535 struct cx18 *cx;
27536
27537 /* FIXME - module parameter arrays constrain max instances */
27538 - i = atomic_inc_return(&cx18_instance) - 1;
27539 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
27540 if (i >= CX18_MAX_CARDS) {
27541 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
27542 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
27543 diff -urNp linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c
27544 --- linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-05-19 00:06:34.000000000 -0400
27545 +++ linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-05 19:44:37.000000000 -0400
27546 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27547 bool handle = false;
27548 struct ir_raw_event ir_core_event[64];
27549
27550 + pax_track_stack();
27551 +
27552 do {
27553 num = 0;
27554 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27555 diff -urNp linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c
27556 --- linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-05-19 00:06:34.000000000 -0400
27557 +++ linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-08-05 19:44:37.000000000 -0400
27558 @@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl
27559 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
27560
27561 /* ivtv instance counter */
27562 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
27563 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
27564
27565 /* Parameter declarations */
27566 static int cardtype[IVTV_MAX_CARDS];
27567 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.c linux-2.6.39.4/drivers/media/video/omap24xxcam.c
27568 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-05-19 00:06:34.000000000 -0400
27569 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-08-05 19:44:37.000000000 -0400
27570 @@ -403,7 +403,7 @@ static void omap24xxcam_vbq_complete(str
27571 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
27572
27573 do_gettimeofday(&vb->ts);
27574 - vb->field_count = atomic_add_return(2, &fh->field_count);
27575 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
27576 if (csr & csr_error) {
27577 vb->state = VIDEOBUF_ERROR;
27578 if (!atomic_read(&fh->cam->in_reset)) {
27579 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.h linux-2.6.39.4/drivers/media/video/omap24xxcam.h
27580 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-05-19 00:06:34.000000000 -0400
27581 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-08-05 19:44:37.000000000 -0400
27582 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
27583 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
27584 struct videobuf_queue vbq;
27585 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
27586 - atomic_t field_count; /* field counter for videobuf_buffer */
27587 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
27588 /* accessing cam here doesn't need serialisation: it's constant */
27589 struct omap24xxcam_device *cam;
27590 };
27591 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27592 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-19 00:06:34.000000000 -0400
27593 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-05 19:44:37.000000000 -0400
27594 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27595 u8 *eeprom;
27596 struct tveeprom tvdata;
27597
27598 + pax_track_stack();
27599 +
27600 memset(&tvdata,0,sizeof(tvdata));
27601
27602 eeprom = pvr2_eeprom_fetch(hdw);
27603 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
27604 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-05-19 00:06:34.000000000 -0400
27605 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-05 20:34:06.000000000 -0400
27606 @@ -196,7 +196,7 @@ struct pvr2_hdw {
27607
27608 /* I2C stuff */
27609 struct i2c_adapter i2c_adap;
27610 - struct i2c_algorithm i2c_algo;
27611 + i2c_algorithm_no_const i2c_algo;
27612 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
27613 int i2c_cx25840_hack_state;
27614 int i2c_linked;
27615 diff -urNp linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c
27616 --- linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-05-19 00:06:34.000000000 -0400
27617 +++ linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-05 19:44:37.000000000 -0400
27618 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27619 unsigned char localPAT[256];
27620 unsigned char localPMT[256];
27621
27622 + pax_track_stack();
27623 +
27624 /* Set video format - must be done first as it resets other settings */
27625 set_reg8(client, 0x41, h->video_format);
27626
27627 diff -urNp linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c
27628 --- linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-19 00:06:34.000000000 -0400
27629 +++ linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-05 19:44:37.000000000 -0400
27630 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27631 u8 tmp[512];
27632 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27633
27634 + pax_track_stack();
27635 +
27636 /* While any outstand message on the bus exists... */
27637 do {
27638
27639 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27640 u8 tmp[512];
27641 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27642
27643 + pax_track_stack();
27644 +
27645 while (loop) {
27646
27647 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27648 diff -urNp linux-2.6.39.4/drivers/media/video/timblogiw.c linux-2.6.39.4/drivers/media/video/timblogiw.c
27649 --- linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-05-19 00:06:34.000000000 -0400
27650 +++ linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-08-05 20:34:06.000000000 -0400
27651 @@ -746,7 +746,7 @@ static int timblogiw_mmap(struct file *f
27652
27653 /* Platform device functions */
27654
27655 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27656 +static __devinitdata struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27657 .vidioc_querycap = timblogiw_querycap,
27658 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27659 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27660 @@ -768,7 +768,7 @@ static __devinitconst struct v4l2_ioctl_
27661 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
27662 };
27663
27664 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
27665 +static __devinitdata struct v4l2_file_operations timblogiw_fops = {
27666 .owner = THIS_MODULE,
27667 .open = timblogiw_open,
27668 .release = timblogiw_close,
27669 diff -urNp linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c
27670 --- linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-05-19 00:06:34.000000000 -0400
27671 +++ linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-05 19:44:37.000000000 -0400
27672 @@ -799,6 +799,8 @@ static enum parse_state usbvision_parse_
27673 unsigned char rv, gv, bv;
27674 static unsigned char *Y, *U, *V;
27675
27676 + pax_track_stack();
27677 +
27678 frame = usbvision->cur_frame;
27679 image_size = frame->frmwidth * frame->frmheight;
27680 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27681 diff -urNp linux-2.6.39.4/drivers/media/video/v4l2-device.c linux-2.6.39.4/drivers/media/video/v4l2-device.c
27682 --- linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-05-19 00:06:34.000000000 -0400
27683 +++ linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-08-05 19:44:37.000000000 -0400
27684 @@ -71,9 +71,9 @@ int v4l2_device_put(struct v4l2_device *
27685 EXPORT_SYMBOL_GPL(v4l2_device_put);
27686
27687 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
27688 - atomic_t *instance)
27689 + atomic_unchecked_t *instance)
27690 {
27691 - int num = atomic_inc_return(instance) - 1;
27692 + int num = atomic_inc_return_unchecked(instance) - 1;
27693 int len = strlen(basename);
27694
27695 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
27696 diff -urNp linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c
27697 --- linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-05-19 00:06:34.000000000 -0400
27698 +++ linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-08-05 19:44:37.000000000 -0400
27699 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27700 {
27701 struct videobuf_queue q;
27702
27703 + pax_track_stack();
27704 +
27705 /* Required to make generic handler to call __videobuf_alloc */
27706 q.int_ops = &sg_ops;
27707
27708 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptbase.c linux-2.6.39.4/drivers/message/fusion/mptbase.c
27709 --- linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-05-19 00:06:34.000000000 -0400
27710 +++ linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-08-05 20:34:06.000000000 -0400
27711 @@ -6683,8 +6683,13 @@ static int mpt_iocinfo_proc_show(struct
27712 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27713 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27714
27715 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27716 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27717 +#else
27718 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27719 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27720 +#endif
27721 +
27722 /*
27723 * Rounding UP to nearest 4-kB boundary here...
27724 */
27725 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptsas.c linux-2.6.39.4/drivers/message/fusion/mptsas.c
27726 --- linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-05-19 00:06:34.000000000 -0400
27727 +++ linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-08-05 19:44:37.000000000 -0400
27728 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27729 return 0;
27730 }
27731
27732 +static inline void
27733 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27734 +{
27735 + if (phy_info->port_details) {
27736 + phy_info->port_details->rphy = rphy;
27737 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27738 + ioc->name, rphy));
27739 + }
27740 +
27741 + if (rphy) {
27742 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27743 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27744 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27745 + ioc->name, rphy, rphy->dev.release));
27746 + }
27747 +}
27748 +
27749 /* no mutex */
27750 static void
27751 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27752 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27753 return NULL;
27754 }
27755
27756 -static inline void
27757 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27758 -{
27759 - if (phy_info->port_details) {
27760 - phy_info->port_details->rphy = rphy;
27761 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27762 - ioc->name, rphy));
27763 - }
27764 -
27765 - if (rphy) {
27766 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27767 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27768 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27769 - ioc->name, rphy, rphy->dev.release));
27770 - }
27771 -}
27772 -
27773 static inline struct sas_port *
27774 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27775 {
27776 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptscsih.c linux-2.6.39.4/drivers/message/fusion/mptscsih.c
27777 --- linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-05-19 00:06:34.000000000 -0400
27778 +++ linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-08-05 19:44:37.000000000 -0400
27779 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27780
27781 h = shost_priv(SChost);
27782
27783 - if (h) {
27784 - if (h->info_kbuf == NULL)
27785 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27786 - return h->info_kbuf;
27787 - h->info_kbuf[0] = '\0';
27788 + if (!h)
27789 + return NULL;
27790
27791 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27792 - h->info_kbuf[size-1] = '\0';
27793 - }
27794 + if (h->info_kbuf == NULL)
27795 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27796 + return h->info_kbuf;
27797 + h->info_kbuf[0] = '\0';
27798 +
27799 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27800 + h->info_kbuf[size-1] = '\0';
27801
27802 return h->info_kbuf;
27803 }
27804 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_config.c linux-2.6.39.4/drivers/message/i2o/i2o_config.c
27805 --- linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-05-19 00:06:34.000000000 -0400
27806 +++ linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-08-05 19:44:37.000000000 -0400
27807 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27808 struct i2o_message *msg;
27809 unsigned int iop;
27810
27811 + pax_track_stack();
27812 +
27813 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27814 return -EFAULT;
27815
27816 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_proc.c linux-2.6.39.4/drivers/message/i2o/i2o_proc.c
27817 --- linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-05-19 00:06:34.000000000 -0400
27818 +++ linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-08-05 19:44:37.000000000 -0400
27819 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27820 "Array Controller Device"
27821 };
27822
27823 -static char *chtostr(u8 * chars, int n)
27824 -{
27825 - char tmp[256];
27826 - tmp[0] = 0;
27827 - return strncat(tmp, (char *)chars, n);
27828 -}
27829 -
27830 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27831 char *group)
27832 {
27833 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27834
27835 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27836 seq_printf(seq, "%-#8x", ddm_table.module_id);
27837 - seq_printf(seq, "%-29s",
27838 - chtostr(ddm_table.module_name_version, 28));
27839 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27840 seq_printf(seq, "%9d ", ddm_table.data_size);
27841 seq_printf(seq, "%8d", ddm_table.code_size);
27842
27843 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27844
27845 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27846 seq_printf(seq, "%-#8x", dst->module_id);
27847 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27848 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27849 + seq_printf(seq, "%-.28s", dst->module_name_version);
27850 + seq_printf(seq, "%-.8s", dst->date);
27851 seq_printf(seq, "%8d ", dst->module_size);
27852 seq_printf(seq, "%8d ", dst->mpb_size);
27853 seq_printf(seq, "0x%04x", dst->module_flags);
27854 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27855 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27856 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27857 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27858 - seq_printf(seq, "Vendor info : %s\n",
27859 - chtostr((u8 *) (work32 + 2), 16));
27860 - seq_printf(seq, "Product info : %s\n",
27861 - chtostr((u8 *) (work32 + 6), 16));
27862 - seq_printf(seq, "Description : %s\n",
27863 - chtostr((u8 *) (work32 + 10), 16));
27864 - seq_printf(seq, "Product rev. : %s\n",
27865 - chtostr((u8 *) (work32 + 14), 8));
27866 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27867 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27868 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27869 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27870
27871 seq_printf(seq, "Serial number : ");
27872 print_serial_number(seq, (u8 *) (work32 + 16),
27873 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27874 }
27875
27876 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27877 - seq_printf(seq, "Module name : %s\n",
27878 - chtostr(result.module_name, 24));
27879 - seq_printf(seq, "Module revision : %s\n",
27880 - chtostr(result.module_rev, 8));
27881 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27882 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27883
27884 seq_printf(seq, "Serial number : ");
27885 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27886 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27887 return 0;
27888 }
27889
27890 - seq_printf(seq, "Device name : %s\n",
27891 - chtostr(result.device_name, 64));
27892 - seq_printf(seq, "Service name : %s\n",
27893 - chtostr(result.service_name, 64));
27894 - seq_printf(seq, "Physical name : %s\n",
27895 - chtostr(result.physical_location, 64));
27896 - seq_printf(seq, "Instance number : %s\n",
27897 - chtostr(result.instance_number, 4));
27898 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27899 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27900 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27901 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27902
27903 return 0;
27904 }
27905 diff -urNp linux-2.6.39.4/drivers/message/i2o/iop.c linux-2.6.39.4/drivers/message/i2o/iop.c
27906 --- linux-2.6.39.4/drivers/message/i2o/iop.c 2011-05-19 00:06:34.000000000 -0400
27907 +++ linux-2.6.39.4/drivers/message/i2o/iop.c 2011-08-05 19:44:37.000000000 -0400
27908 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27909
27910 spin_lock_irqsave(&c->context_list_lock, flags);
27911
27912 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27913 - atomic_inc(&c->context_list_counter);
27914 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27915 + atomic_inc_unchecked(&c->context_list_counter);
27916
27917 - entry->context = atomic_read(&c->context_list_counter);
27918 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27919
27920 list_add(&entry->list, &c->context_list);
27921
27922 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27923
27924 #if BITS_PER_LONG == 64
27925 spin_lock_init(&c->context_list_lock);
27926 - atomic_set(&c->context_list_counter, 0);
27927 + atomic_set_unchecked(&c->context_list_counter, 0);
27928 INIT_LIST_HEAD(&c->context_list);
27929 #endif
27930
27931 diff -urNp linux-2.6.39.4/drivers/mfd/abx500-core.c linux-2.6.39.4/drivers/mfd/abx500-core.c
27932 --- linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-05-19 00:06:34.000000000 -0400
27933 +++ linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-08-05 20:34:06.000000000 -0400
27934 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27935
27936 struct abx500_device_entry {
27937 struct list_head list;
27938 - struct abx500_ops ops;
27939 + abx500_ops_no_const ops;
27940 struct device *dev;
27941 };
27942
27943 diff -urNp linux-2.6.39.4/drivers/mfd/janz-cmodio.c linux-2.6.39.4/drivers/mfd/janz-cmodio.c
27944 --- linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-05-19 00:06:34.000000000 -0400
27945 +++ linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-08-05 19:44:37.000000000 -0400
27946 @@ -13,6 +13,7 @@
27947
27948 #include <linux/kernel.h>
27949 #include <linux/module.h>
27950 +#include <linux/slab.h>
27951 #include <linux/init.h>
27952 #include <linux/pci.h>
27953 #include <linux/interrupt.h>
27954 diff -urNp linux-2.6.39.4/drivers/mfd/wm8350-i2c.c linux-2.6.39.4/drivers/mfd/wm8350-i2c.c
27955 --- linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-05-19 00:06:34.000000000 -0400
27956 +++ linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-08-05 19:44:37.000000000 -0400
27957 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27958 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27959 int ret;
27960
27961 + pax_track_stack();
27962 +
27963 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27964 return -EINVAL;
27965
27966 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c
27967 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-05-19 00:06:34.000000000 -0400
27968 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-05 19:44:37.000000000 -0400
27969 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27970 * the lid is closed. This leads to interrupts as soon as a little move
27971 * is done.
27972 */
27973 - atomic_inc(&lis3_dev.count);
27974 + atomic_inc_unchecked(&lis3_dev.count);
27975
27976 wake_up_interruptible(&lis3_dev.misc_wait);
27977 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27978 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27979 if (lis3_dev.pm_dev)
27980 pm_runtime_get_sync(lis3_dev.pm_dev);
27981
27982 - atomic_set(&lis3_dev.count, 0);
27983 + atomic_set_unchecked(&lis3_dev.count, 0);
27984 return 0;
27985 }
27986
27987 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27988 add_wait_queue(&lis3_dev.misc_wait, &wait);
27989 while (true) {
27990 set_current_state(TASK_INTERRUPTIBLE);
27991 - data = atomic_xchg(&lis3_dev.count, 0);
27992 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27993 if (data)
27994 break;
27995
27996 @@ -583,7 +583,7 @@ out:
27997 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27998 {
27999 poll_wait(file, &lis3_dev.misc_wait, wait);
28000 - if (atomic_read(&lis3_dev.count))
28001 + if (atomic_read_unchecked(&lis3_dev.count))
28002 return POLLIN | POLLRDNORM;
28003 return 0;
28004 }
28005 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h
28006 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-05-19 00:06:34.000000000 -0400
28007 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-05 19:44:37.000000000 -0400
28008 @@ -265,7 +265,7 @@ struct lis3lv02d {
28009 struct input_polled_dev *idev; /* input device */
28010 struct platform_device *pdev; /* platform device */
28011 struct regulator_bulk_data regulators[2];
28012 - atomic_t count; /* interrupt count after last read */
28013 + atomic_unchecked_t count; /* interrupt count after last read */
28014 union axis_conversion ac; /* hw -> logical axis */
28015 int mapped_btns[3];
28016
28017 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c
28018 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-05-19 00:06:34.000000000 -0400
28019 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-05 19:44:37.000000000 -0400
28020 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
28021 unsigned long nsec;
28022
28023 nsec = CLKS2NSEC(clks);
28024 - atomic_long_inc(&mcs_op_statistics[op].count);
28025 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
28026 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28027 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28028 if (mcs_op_statistics[op].max < nsec)
28029 mcs_op_statistics[op].max = nsec;
28030 }
28031 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c
28032 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-05-19 00:06:34.000000000 -0400
28033 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-05 19:44:37.000000000 -0400
28034 @@ -32,9 +32,9 @@
28035
28036 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28037
28038 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28039 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28040 {
28041 - unsigned long val = atomic_long_read(v);
28042 + unsigned long val = atomic_long_read_unchecked(v);
28043
28044 seq_printf(s, "%16lu %s\n", val, id);
28045 }
28046 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28047
28048 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28049 for (op = 0; op < mcsop_last; op++) {
28050 - count = atomic_long_read(&mcs_op_statistics[op].count);
28051 - total = atomic_long_read(&mcs_op_statistics[op].total);
28052 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28053 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28054 max = mcs_op_statistics[op].max;
28055 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28056 count ? total / count : 0, max);
28057 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h
28058 --- linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-05-19 00:06:34.000000000 -0400
28059 +++ linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-08-05 19:44:37.000000000 -0400
28060 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28061 * GRU statistics.
28062 */
28063 struct gru_stats_s {
28064 - atomic_long_t vdata_alloc;
28065 - atomic_long_t vdata_free;
28066 - atomic_long_t gts_alloc;
28067 - atomic_long_t gts_free;
28068 - atomic_long_t gms_alloc;
28069 - atomic_long_t gms_free;
28070 - atomic_long_t gts_double_allocate;
28071 - atomic_long_t assign_context;
28072 - atomic_long_t assign_context_failed;
28073 - atomic_long_t free_context;
28074 - atomic_long_t load_user_context;
28075 - atomic_long_t load_kernel_context;
28076 - atomic_long_t lock_kernel_context;
28077 - atomic_long_t unlock_kernel_context;
28078 - atomic_long_t steal_user_context;
28079 - atomic_long_t steal_kernel_context;
28080 - atomic_long_t steal_context_failed;
28081 - atomic_long_t nopfn;
28082 - atomic_long_t asid_new;
28083 - atomic_long_t asid_next;
28084 - atomic_long_t asid_wrap;
28085 - atomic_long_t asid_reuse;
28086 - atomic_long_t intr;
28087 - atomic_long_t intr_cbr;
28088 - atomic_long_t intr_tfh;
28089 - atomic_long_t intr_spurious;
28090 - atomic_long_t intr_mm_lock_failed;
28091 - atomic_long_t call_os;
28092 - atomic_long_t call_os_wait_queue;
28093 - atomic_long_t user_flush_tlb;
28094 - atomic_long_t user_unload_context;
28095 - atomic_long_t user_exception;
28096 - atomic_long_t set_context_option;
28097 - atomic_long_t check_context_retarget_intr;
28098 - atomic_long_t check_context_unload;
28099 - atomic_long_t tlb_dropin;
28100 - atomic_long_t tlb_preload_page;
28101 - atomic_long_t tlb_dropin_fail_no_asid;
28102 - atomic_long_t tlb_dropin_fail_upm;
28103 - atomic_long_t tlb_dropin_fail_invalid;
28104 - atomic_long_t tlb_dropin_fail_range_active;
28105 - atomic_long_t tlb_dropin_fail_idle;
28106 - atomic_long_t tlb_dropin_fail_fmm;
28107 - atomic_long_t tlb_dropin_fail_no_exception;
28108 - atomic_long_t tfh_stale_on_fault;
28109 - atomic_long_t mmu_invalidate_range;
28110 - atomic_long_t mmu_invalidate_page;
28111 - atomic_long_t flush_tlb;
28112 - atomic_long_t flush_tlb_gru;
28113 - atomic_long_t flush_tlb_gru_tgh;
28114 - atomic_long_t flush_tlb_gru_zero_asid;
28115 -
28116 - atomic_long_t copy_gpa;
28117 - atomic_long_t read_gpa;
28118 -
28119 - atomic_long_t mesq_receive;
28120 - atomic_long_t mesq_receive_none;
28121 - atomic_long_t mesq_send;
28122 - atomic_long_t mesq_send_failed;
28123 - atomic_long_t mesq_noop;
28124 - atomic_long_t mesq_send_unexpected_error;
28125 - atomic_long_t mesq_send_lb_overflow;
28126 - atomic_long_t mesq_send_qlimit_reached;
28127 - atomic_long_t mesq_send_amo_nacked;
28128 - atomic_long_t mesq_send_put_nacked;
28129 - atomic_long_t mesq_page_overflow;
28130 - atomic_long_t mesq_qf_locked;
28131 - atomic_long_t mesq_qf_noop_not_full;
28132 - atomic_long_t mesq_qf_switch_head_failed;
28133 - atomic_long_t mesq_qf_unexpected_error;
28134 - atomic_long_t mesq_noop_unexpected_error;
28135 - atomic_long_t mesq_noop_lb_overflow;
28136 - atomic_long_t mesq_noop_qlimit_reached;
28137 - atomic_long_t mesq_noop_amo_nacked;
28138 - atomic_long_t mesq_noop_put_nacked;
28139 - atomic_long_t mesq_noop_page_overflow;
28140 + atomic_long_unchecked_t vdata_alloc;
28141 + atomic_long_unchecked_t vdata_free;
28142 + atomic_long_unchecked_t gts_alloc;
28143 + atomic_long_unchecked_t gts_free;
28144 + atomic_long_unchecked_t gms_alloc;
28145 + atomic_long_unchecked_t gms_free;
28146 + atomic_long_unchecked_t gts_double_allocate;
28147 + atomic_long_unchecked_t assign_context;
28148 + atomic_long_unchecked_t assign_context_failed;
28149 + atomic_long_unchecked_t free_context;
28150 + atomic_long_unchecked_t load_user_context;
28151 + atomic_long_unchecked_t load_kernel_context;
28152 + atomic_long_unchecked_t lock_kernel_context;
28153 + atomic_long_unchecked_t unlock_kernel_context;
28154 + atomic_long_unchecked_t steal_user_context;
28155 + atomic_long_unchecked_t steal_kernel_context;
28156 + atomic_long_unchecked_t steal_context_failed;
28157 + atomic_long_unchecked_t nopfn;
28158 + atomic_long_unchecked_t asid_new;
28159 + atomic_long_unchecked_t asid_next;
28160 + atomic_long_unchecked_t asid_wrap;
28161 + atomic_long_unchecked_t asid_reuse;
28162 + atomic_long_unchecked_t intr;
28163 + atomic_long_unchecked_t intr_cbr;
28164 + atomic_long_unchecked_t intr_tfh;
28165 + atomic_long_unchecked_t intr_spurious;
28166 + atomic_long_unchecked_t intr_mm_lock_failed;
28167 + atomic_long_unchecked_t call_os;
28168 + atomic_long_unchecked_t call_os_wait_queue;
28169 + atomic_long_unchecked_t user_flush_tlb;
28170 + atomic_long_unchecked_t user_unload_context;
28171 + atomic_long_unchecked_t user_exception;
28172 + atomic_long_unchecked_t set_context_option;
28173 + atomic_long_unchecked_t check_context_retarget_intr;
28174 + atomic_long_unchecked_t check_context_unload;
28175 + atomic_long_unchecked_t tlb_dropin;
28176 + atomic_long_unchecked_t tlb_preload_page;
28177 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28178 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28179 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28180 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28181 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28182 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28183 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28184 + atomic_long_unchecked_t tfh_stale_on_fault;
28185 + atomic_long_unchecked_t mmu_invalidate_range;
28186 + atomic_long_unchecked_t mmu_invalidate_page;
28187 + atomic_long_unchecked_t flush_tlb;
28188 + atomic_long_unchecked_t flush_tlb_gru;
28189 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28190 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28191 +
28192 + atomic_long_unchecked_t copy_gpa;
28193 + atomic_long_unchecked_t read_gpa;
28194 +
28195 + atomic_long_unchecked_t mesq_receive;
28196 + atomic_long_unchecked_t mesq_receive_none;
28197 + atomic_long_unchecked_t mesq_send;
28198 + atomic_long_unchecked_t mesq_send_failed;
28199 + atomic_long_unchecked_t mesq_noop;
28200 + atomic_long_unchecked_t mesq_send_unexpected_error;
28201 + atomic_long_unchecked_t mesq_send_lb_overflow;
28202 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28203 + atomic_long_unchecked_t mesq_send_amo_nacked;
28204 + atomic_long_unchecked_t mesq_send_put_nacked;
28205 + atomic_long_unchecked_t mesq_page_overflow;
28206 + atomic_long_unchecked_t mesq_qf_locked;
28207 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28208 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28209 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28210 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28211 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28212 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28213 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28214 + atomic_long_unchecked_t mesq_noop_put_nacked;
28215 + atomic_long_unchecked_t mesq_noop_page_overflow;
28216
28217 };
28218
28219 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28220 tghop_invalidate, mcsop_last};
28221
28222 struct mcs_op_statistic {
28223 - atomic_long_t count;
28224 - atomic_long_t total;
28225 + atomic_long_unchecked_t count;
28226 + atomic_long_unchecked_t total;
28227 unsigned long max;
28228 };
28229
28230 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28231
28232 #define STAT(id) do { \
28233 if (gru_options & OPT_STATS) \
28234 - atomic_long_inc(&gru_stats.id); \
28235 + atomic_long_inc_unchecked(&gru_stats.id); \
28236 } while (0)
28237
28238 #ifdef CONFIG_SGI_GRU_DEBUG
28239 diff -urNp linux-2.6.39.4/drivers/misc/sgi-xp/xp.h linux-2.6.39.4/drivers/misc/sgi-xp/xp.h
28240 --- linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-05-19 00:06:34.000000000 -0400
28241 +++ linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-08-05 20:34:06.000000000 -0400
28242 @@ -289,7 +289,7 @@ struct xpc_interface {
28243 xpc_notify_func, void *);
28244 void (*received) (short, int, void *);
28245 enum xp_retval (*partid_to_nasids) (short, void *);
28246 -};
28247 +} __no_const;
28248
28249 extern struct xpc_interface xpc_interface;
28250
28251 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c
28252 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-19 00:06:34.000000000 -0400
28253 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-05 19:44:37.000000000 -0400
28254 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28255 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28256 unsigned long timeo = jiffies + HZ;
28257
28258 + pax_track_stack();
28259 +
28260 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28261 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28262 goto sleep;
28263 @@ -1657,6 +1659,8 @@ static int __xipram do_write_buffer(stru
28264 unsigned long initial_adr;
28265 int initial_len = len;
28266
28267 + pax_track_stack();
28268 +
28269 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28270 adr += chip->start;
28271 initial_adr = adr;
28272 @@ -1875,6 +1879,8 @@ static int __xipram do_erase_oneblock(st
28273 int retries = 3;
28274 int ret;
28275
28276 + pax_track_stack();
28277 +
28278 adr += chip->start;
28279
28280 retry:
28281 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c
28282 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-19 00:06:34.000000000 -0400
28283 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-05 19:44:37.000000000 -0400
28284 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28285 unsigned long cmd_addr;
28286 struct cfi_private *cfi = map->fldrv_priv;
28287
28288 + pax_track_stack();
28289 +
28290 adr += chip->start;
28291
28292 /* Ensure cmd read/writes are aligned. */
28293 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
28294 DECLARE_WAITQUEUE(wait, current);
28295 int wbufsize, z;
28296
28297 + pax_track_stack();
28298 +
28299 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28300 if (adr & (map_bankwidth(map)-1))
28301 return -EINVAL;
28302 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
28303 DECLARE_WAITQUEUE(wait, current);
28304 int ret = 0;
28305
28306 + pax_track_stack();
28307 +
28308 adr += chip->start;
28309
28310 /* Let's determine this according to the interleave only once */
28311 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
28312 unsigned long timeo = jiffies + HZ;
28313 DECLARE_WAITQUEUE(wait, current);
28314
28315 + pax_track_stack();
28316 +
28317 adr += chip->start;
28318
28319 /* Let's determine this according to the interleave only once */
28320 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
28321 unsigned long timeo = jiffies + HZ;
28322 DECLARE_WAITQUEUE(wait, current);
28323
28324 + pax_track_stack();
28325 +
28326 adr += chip->start;
28327
28328 /* Let's determine this according to the interleave only once */
28329 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2000.c linux-2.6.39.4/drivers/mtd/devices/doc2000.c
28330 --- linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-05-19 00:06:34.000000000 -0400
28331 +++ linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-08-05 19:44:37.000000000 -0400
28332 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28333
28334 /* The ECC will not be calculated correctly if less than 512 is written */
28335 /* DBB-
28336 - if (len != 0x200 && eccbuf)
28337 + if (len != 0x200)
28338 printk(KERN_WARNING
28339 "ECC needs a full sector write (adr: %lx size %lx)\n",
28340 (long) to, (long) len);
28341 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2001.c linux-2.6.39.4/drivers/mtd/devices/doc2001.c
28342 --- linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-05-19 00:06:34.000000000 -0400
28343 +++ linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-08-05 19:44:37.000000000 -0400
28344 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28345 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28346
28347 /* Don't allow read past end of device */
28348 - if (from >= this->totlen)
28349 + if (from >= this->totlen || !len)
28350 return -EINVAL;
28351
28352 /* Don't allow a single read to cross a 512-byte block boundary */
28353 diff -urNp linux-2.6.39.4/drivers/mtd/ftl.c linux-2.6.39.4/drivers/mtd/ftl.c
28354 --- linux-2.6.39.4/drivers/mtd/ftl.c 2011-05-19 00:06:34.000000000 -0400
28355 +++ linux-2.6.39.4/drivers/mtd/ftl.c 2011-08-05 19:44:37.000000000 -0400
28356 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28357 loff_t offset;
28358 uint16_t srcunitswap = cpu_to_le16(srcunit);
28359
28360 + pax_track_stack();
28361 +
28362 eun = &part->EUNInfo[srcunit];
28363 xfer = &part->XferInfo[xferunit];
28364 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28365 diff -urNp linux-2.6.39.4/drivers/mtd/inftlcore.c linux-2.6.39.4/drivers/mtd/inftlcore.c
28366 --- linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-05-19 00:06:34.000000000 -0400
28367 +++ linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-08-05 19:44:37.000000000 -0400
28368 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28369 struct inftl_oob oob;
28370 size_t retlen;
28371
28372 + pax_track_stack();
28373 +
28374 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28375 "pending=%d)\n", inftl, thisVUC, pendingblock);
28376
28377 diff -urNp linux-2.6.39.4/drivers/mtd/inftlmount.c linux-2.6.39.4/drivers/mtd/inftlmount.c
28378 --- linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-05-19 00:06:34.000000000 -0400
28379 +++ linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-08-05 19:44:37.000000000 -0400
28380 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28381 struct INFTLPartition *ip;
28382 size_t retlen;
28383
28384 + pax_track_stack();
28385 +
28386 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28387
28388 /*
28389 diff -urNp linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c
28390 --- linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-05-19 00:06:34.000000000 -0400
28391 +++ linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-05 19:44:37.000000000 -0400
28392 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28393 {
28394 map_word pfow_val[4];
28395
28396 + pax_track_stack();
28397 +
28398 /* Check identification string */
28399 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28400 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28401 diff -urNp linux-2.6.39.4/drivers/mtd/mtdchar.c linux-2.6.39.4/drivers/mtd/mtdchar.c
28402 --- linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-05-19 00:06:34.000000000 -0400
28403 +++ linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-08-05 19:44:37.000000000 -0400
28404 @@ -560,6 +560,8 @@ static int mtd_ioctl(struct file *file,
28405 u_long size;
28406 struct mtd_info_user info;
28407
28408 + pax_track_stack();
28409 +
28410 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28411
28412 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28413 diff -urNp linux-2.6.39.4/drivers/mtd/nand/denali.c linux-2.6.39.4/drivers/mtd/nand/denali.c
28414 --- linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-05-19 00:06:34.000000000 -0400
28415 +++ linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-08-05 19:44:37.000000000 -0400
28416 @@ -25,6 +25,7 @@
28417 #include <linux/pci.h>
28418 #include <linux/mtd/mtd.h>
28419 #include <linux/module.h>
28420 +#include <linux/slab.h>
28421
28422 #include "denali.h"
28423
28424 diff -urNp linux-2.6.39.4/drivers/mtd/nftlcore.c linux-2.6.39.4/drivers/mtd/nftlcore.c
28425 --- linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-05-19 00:06:34.000000000 -0400
28426 +++ linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-08-05 19:44:37.000000000 -0400
28427 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28428 int inplace = 1;
28429 size_t retlen;
28430
28431 + pax_track_stack();
28432 +
28433 memset(BlockMap, 0xff, sizeof(BlockMap));
28434 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28435
28436 diff -urNp linux-2.6.39.4/drivers/mtd/nftlmount.c linux-2.6.39.4/drivers/mtd/nftlmount.c
28437 --- linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-05-19 00:06:34.000000000 -0400
28438 +++ linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-08-05 19:44:37.000000000 -0400
28439 @@ -24,6 +24,7 @@
28440 #include <asm/errno.h>
28441 #include <linux/delay.h>
28442 #include <linux/slab.h>
28443 +#include <linux/sched.h>
28444 #include <linux/mtd/mtd.h>
28445 #include <linux/mtd/nand.h>
28446 #include <linux/mtd/nftl.h>
28447 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28448 struct mtd_info *mtd = nftl->mbd.mtd;
28449 unsigned int i;
28450
28451 + pax_track_stack();
28452 +
28453 /* Assume logical EraseSize == physical erasesize for starting the scan.
28454 We'll sort it out later if we find a MediaHeader which says otherwise */
28455 /* Actually, we won't. The new DiskOnChip driver has already scanned
28456 diff -urNp linux-2.6.39.4/drivers/mtd/ubi/build.c linux-2.6.39.4/drivers/mtd/ubi/build.c
28457 --- linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-05-19 00:06:34.000000000 -0400
28458 +++ linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-08-05 19:44:37.000000000 -0400
28459 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28460 static int __init bytes_str_to_int(const char *str)
28461 {
28462 char *endp;
28463 - unsigned long result;
28464 + unsigned long result, scale = 1;
28465
28466 result = simple_strtoul(str, &endp, 0);
28467 if (str == endp || result >= INT_MAX) {
28468 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28469
28470 switch (*endp) {
28471 case 'G':
28472 - result *= 1024;
28473 + scale *= 1024;
28474 case 'M':
28475 - result *= 1024;
28476 + scale *= 1024;
28477 case 'K':
28478 - result *= 1024;
28479 + scale *= 1024;
28480 if (endp[1] == 'i' && endp[2] == 'B')
28481 endp += 2;
28482 case '\0':
28483 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28484 return -EINVAL;
28485 }
28486
28487 - return result;
28488 + if ((intoverflow_t)result*scale >= INT_MAX) {
28489 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28490 + str);
28491 + return -EINVAL;
28492 + }
28493 +
28494 + return result*scale;
28495 }
28496
28497 /**
28498 diff -urNp linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c
28499 --- linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-05-19 00:06:34.000000000 -0400
28500 +++ linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-05 20:34:06.000000000 -0400
28501 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28502 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28503 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28504
28505 -static struct bfa_ioc_hwif nw_hwif_ct;
28506 +static struct bfa_ioc_hwif nw_hwif_ct = {
28507 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28508 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28509 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28510 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28511 + .ioc_map_port = bfa_ioc_ct_map_port,
28512 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28513 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28514 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28515 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28516 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28517 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28518 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28519 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28520 +};
28521
28522 /**
28523 * Called from bfa_ioc_attach() to map asic specific calls.
28524 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28525 void
28526 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28527 {
28528 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28529 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28530 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28531 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28532 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28533 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28534 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28535 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28536 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28537 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28538 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28539 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28540 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28541 -
28542 ioc->ioc_hwif = &nw_hwif_ct;
28543 }
28544
28545 diff -urNp linux-2.6.39.4/drivers/net/bna/bnad.c linux-2.6.39.4/drivers/net/bna/bnad.c
28546 --- linux-2.6.39.4/drivers/net/bna/bnad.c 2011-05-19 00:06:34.000000000 -0400
28547 +++ linux-2.6.39.4/drivers/net/bna/bnad.c 2011-08-05 20:34:06.000000000 -0400
28548 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28549 struct bna_intr_info *intr_info =
28550 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28551 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28552 - struct bna_tx_event_cbfn tx_cbfn;
28553 + static struct bna_tx_event_cbfn tx_cbfn = {
28554 + /* Initialize the tx event handlers */
28555 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28556 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28557 + .tx_stall_cbfn = bnad_cb_tx_stall,
28558 + .tx_resume_cbfn = bnad_cb_tx_resume,
28559 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28560 + };
28561 struct bna_tx *tx;
28562 unsigned long flags;
28563
28564 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28565 tx_config->txq_depth = bnad->txq_depth;
28566 tx_config->tx_type = BNA_TX_T_REGULAR;
28567
28568 - /* Initialize the tx event handlers */
28569 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28570 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28571 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28572 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28573 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28574 -
28575 /* Get BNA's resource requirement for one tx object */
28576 spin_lock_irqsave(&bnad->bna_lock, flags);
28577 bna_tx_res_req(bnad->num_txq_per_tx,
28578 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28579 struct bna_intr_info *intr_info =
28580 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28581 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28582 - struct bna_rx_event_cbfn rx_cbfn;
28583 + static struct bna_rx_event_cbfn rx_cbfn = {
28584 + /* Initialize the Rx event handlers */
28585 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28586 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28587 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28588 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28589 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28590 + .rx_post_cbfn = bnad_cb_rx_post
28591 + };
28592 struct bna_rx *rx;
28593 unsigned long flags;
28594
28595 /* Initialize the Rx object configuration */
28596 bnad_init_rx_config(bnad, rx_config);
28597
28598 - /* Initialize the Rx event handlers */
28599 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28600 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28601 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28602 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28603 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28604 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28605 -
28606 /* Get BNA's resource requirement for one Rx object */
28607 spin_lock_irqsave(&bnad->bna_lock, flags);
28608 bna_rx_res_req(rx_config, res_info);
28609 diff -urNp linux-2.6.39.4/drivers/net/bnx2.c linux-2.6.39.4/drivers/net/bnx2.c
28610 --- linux-2.6.39.4/drivers/net/bnx2.c 2011-05-19 00:06:34.000000000 -0400
28611 +++ linux-2.6.39.4/drivers/net/bnx2.c 2011-08-05 19:44:37.000000000 -0400
28612 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28613 int rc = 0;
28614 u32 magic, csum;
28615
28616 + pax_track_stack();
28617 +
28618 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28619 goto test_nvram_done;
28620
28621 diff -urNp linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c
28622 --- linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-05-19 00:06:34.000000000 -0400
28623 +++ linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-05 19:44:37.000000000 -0400
28624 @@ -1788,6 +1788,8 @@ static int bnx2x_test_nvram(struct bnx2x
28625 int i, rc;
28626 u32 magic, crc;
28627
28628 + pax_track_stack();
28629 +
28630 if (BP_NOMCP(bp))
28631 return 0;
28632
28633 diff -urNp linux-2.6.39.4/drivers/net/cxgb3/l2t.h linux-2.6.39.4/drivers/net/cxgb3/l2t.h
28634 --- linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-05-19 00:06:34.000000000 -0400
28635 +++ linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-08-05 20:34:06.000000000 -0400
28636 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28637 */
28638 struct l2t_skb_cb {
28639 arp_failure_handler_func arp_failure_handler;
28640 -};
28641 +} __no_const;
28642
28643 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28644
28645 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c
28646 --- linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-05-19 00:06:34.000000000 -0400
28647 +++ linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-05 19:44:37.000000000 -0400
28648 @@ -3428,6 +3428,8 @@ static int __devinit enable_msix(struct
28649 unsigned int nchan = adap->params.nports;
28650 struct msix_entry entries[MAX_INGQ + 1];
28651
28652 + pax_track_stack();
28653 +
28654 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28655 entries[i].entry = i;
28656
28657 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c
28658 --- linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-05-19 00:06:34.000000000 -0400
28659 +++ linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-08-05 19:44:37.000000000 -0400
28660 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28661 u8 vpd[VPD_LEN], csum;
28662 unsigned int vpdr_len, kw_offset, id_len;
28663
28664 + pax_track_stack();
28665 +
28666 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28667 if (ret < 0)
28668 return ret;
28669 diff -urNp linux-2.6.39.4/drivers/net/e1000e/82571.c linux-2.6.39.4/drivers/net/e1000e/82571.c
28670 --- linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-05-19 00:06:34.000000000 -0400
28671 +++ linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-08-05 20:34:06.000000000 -0400
28672 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28673 {
28674 struct e1000_hw *hw = &adapter->hw;
28675 struct e1000_mac_info *mac = &hw->mac;
28676 - struct e1000_mac_operations *func = &mac->ops;
28677 + e1000_mac_operations_no_const *func = &mac->ops;
28678 u32 swsm = 0;
28679 u32 swsm2 = 0;
28680 bool force_clear_smbi = false;
28681 diff -urNp linux-2.6.39.4/drivers/net/e1000e/es2lan.c linux-2.6.39.4/drivers/net/e1000e/es2lan.c
28682 --- linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-05-19 00:06:34.000000000 -0400
28683 +++ linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-08-05 20:34:06.000000000 -0400
28684 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28685 {
28686 struct e1000_hw *hw = &adapter->hw;
28687 struct e1000_mac_info *mac = &hw->mac;
28688 - struct e1000_mac_operations *func = &mac->ops;
28689 + e1000_mac_operations_no_const *func = &mac->ops;
28690
28691 /* Set media type */
28692 switch (adapter->pdev->device) {
28693 diff -urNp linux-2.6.39.4/drivers/net/e1000e/hw.h linux-2.6.39.4/drivers/net/e1000e/hw.h
28694 --- linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-05-19 00:06:34.000000000 -0400
28695 +++ linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-08-05 20:34:06.000000000 -0400
28696 @@ -775,6 +775,7 @@ struct e1000_mac_operations {
28697 void (*write_vfta)(struct e1000_hw *, u32, u32);
28698 s32 (*read_mac_addr)(struct e1000_hw *);
28699 };
28700 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28701
28702 /* Function pointers for the PHY. */
28703 struct e1000_phy_operations {
28704 @@ -798,6 +799,7 @@ struct e1000_phy_operations {
28705 void (*power_up)(struct e1000_hw *);
28706 void (*power_down)(struct e1000_hw *);
28707 };
28708 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28709
28710 /* Function pointers for the NVM. */
28711 struct e1000_nvm_operations {
28712 @@ -809,9 +811,10 @@ struct e1000_nvm_operations {
28713 s32 (*validate)(struct e1000_hw *);
28714 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28715 };
28716 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28717
28718 struct e1000_mac_info {
28719 - struct e1000_mac_operations ops;
28720 + e1000_mac_operations_no_const ops;
28721 u8 addr[ETH_ALEN];
28722 u8 perm_addr[ETH_ALEN];
28723
28724 @@ -852,7 +855,7 @@ struct e1000_mac_info {
28725 };
28726
28727 struct e1000_phy_info {
28728 - struct e1000_phy_operations ops;
28729 + e1000_phy_operations_no_const ops;
28730
28731 enum e1000_phy_type type;
28732
28733 @@ -886,7 +889,7 @@ struct e1000_phy_info {
28734 };
28735
28736 struct e1000_nvm_info {
28737 - struct e1000_nvm_operations ops;
28738 + e1000_nvm_operations_no_const ops;
28739
28740 enum e1000_nvm_type type;
28741 enum e1000_nvm_override override;
28742 diff -urNp linux-2.6.39.4/drivers/net/hamradio/6pack.c linux-2.6.39.4/drivers/net/hamradio/6pack.c
28743 --- linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-07-09 09:18:51.000000000 -0400
28744 +++ linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-08-05 19:44:37.000000000 -0400
28745 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28746 unsigned char buf[512];
28747 int count1;
28748
28749 + pax_track_stack();
28750 +
28751 if (!count)
28752 return;
28753
28754 diff -urNp linux-2.6.39.4/drivers/net/igb/e1000_hw.h linux-2.6.39.4/drivers/net/igb/e1000_hw.h
28755 --- linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-05-19 00:06:34.000000000 -0400
28756 +++ linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-08-05 20:34:06.000000000 -0400
28757 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28758 s32 (*read_mac_addr)(struct e1000_hw *);
28759 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28760 };
28761 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28762
28763 struct e1000_phy_operations {
28764 s32 (*acquire)(struct e1000_hw *);
28765 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28766 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28767 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28768 };
28769 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28770
28771 struct e1000_nvm_operations {
28772 s32 (*acquire)(struct e1000_hw *);
28773 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28774 s32 (*update)(struct e1000_hw *);
28775 s32 (*validate)(struct e1000_hw *);
28776 };
28777 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28778
28779 struct e1000_info {
28780 s32 (*get_invariants)(struct e1000_hw *);
28781 @@ -350,7 +353,7 @@ struct e1000_info {
28782 extern const struct e1000_info e1000_82575_info;
28783
28784 struct e1000_mac_info {
28785 - struct e1000_mac_operations ops;
28786 + e1000_mac_operations_no_const ops;
28787
28788 u8 addr[6];
28789 u8 perm_addr[6];
28790 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28791 };
28792
28793 struct e1000_phy_info {
28794 - struct e1000_phy_operations ops;
28795 + e1000_phy_operations_no_const ops;
28796
28797 enum e1000_phy_type type;
28798
28799 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28800 };
28801
28802 struct e1000_nvm_info {
28803 - struct e1000_nvm_operations ops;
28804 + e1000_nvm_operations_no_const ops;
28805 enum e1000_nvm_type type;
28806 enum e1000_nvm_override override;
28807
28808 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28809 s32 (*check_for_ack)(struct e1000_hw *, u16);
28810 s32 (*check_for_rst)(struct e1000_hw *, u16);
28811 };
28812 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28813
28814 struct e1000_mbx_stats {
28815 u32 msgs_tx;
28816 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28817 };
28818
28819 struct e1000_mbx_info {
28820 - struct e1000_mbx_operations ops;
28821 + e1000_mbx_operations_no_const ops;
28822 struct e1000_mbx_stats stats;
28823 u32 timeout;
28824 u32 usec_delay;
28825 diff -urNp linux-2.6.39.4/drivers/net/igbvf/vf.h linux-2.6.39.4/drivers/net/igbvf/vf.h
28826 --- linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-05-19 00:06:34.000000000 -0400
28827 +++ linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-08-05 20:34:06.000000000 -0400
28828 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28829 s32 (*read_mac_addr)(struct e1000_hw *);
28830 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28831 };
28832 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28833
28834 struct e1000_mac_info {
28835 - struct e1000_mac_operations ops;
28836 + e1000_mac_operations_no_const ops;
28837 u8 addr[6];
28838 u8 perm_addr[6];
28839
28840 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28841 s32 (*check_for_ack)(struct e1000_hw *);
28842 s32 (*check_for_rst)(struct e1000_hw *);
28843 };
28844 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28845
28846 struct e1000_mbx_stats {
28847 u32 msgs_tx;
28848 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28849 };
28850
28851 struct e1000_mbx_info {
28852 - struct e1000_mbx_operations ops;
28853 + e1000_mbx_operations_no_const ops;
28854 struct e1000_mbx_stats stats;
28855 u32 timeout;
28856 u32 usec_delay;
28857 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c
28858 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-05-19 00:06:34.000000000 -0400
28859 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-08-05 19:44:37.000000000 -0400
28860 @@ -1069,6 +1069,8 @@ ixgb_set_multi(struct net_device *netdev
28861 u32 rctl;
28862 int i;
28863
28864 + pax_track_stack();
28865 +
28866 /* Check for Promiscuous and All Multicast modes */
28867
28868 rctl = IXGB_READ_REG(hw, RCTL);
28869 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c
28870 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-05-19 00:06:34.000000000 -0400
28871 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-08-05 19:44:37.000000000 -0400
28872 @@ -261,6 +261,9 @@ void __devinit
28873 ixgb_check_options(struct ixgb_adapter *adapter)
28874 {
28875 int bd = adapter->bd_number;
28876 +
28877 + pax_track_stack();
28878 +
28879 if (bd >= IXGB_MAX_NIC) {
28880 pr_notice("Warning: no configuration for board #%i\n", bd);
28881 pr_notice("Using defaults for all values\n");
28882 diff -urNp linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h
28883 --- linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-05-19 00:06:34.000000000 -0400
28884 +++ linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-05 20:34:06.000000000 -0400
28885 @@ -2496,6 +2496,7 @@ struct ixgbe_eeprom_operations {
28886 s32 (*update_checksum)(struct ixgbe_hw *);
28887 u16 (*calc_checksum)(struct ixgbe_hw *);
28888 };
28889 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28890
28891 struct ixgbe_mac_operations {
28892 s32 (*init_hw)(struct ixgbe_hw *);
28893 @@ -2551,6 +2552,7 @@ struct ixgbe_mac_operations {
28894 /* Flow Control */
28895 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28896 };
28897 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28898
28899 struct ixgbe_phy_operations {
28900 s32 (*identify)(struct ixgbe_hw *);
28901 @@ -2570,9 +2572,10 @@ struct ixgbe_phy_operations {
28902 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28903 s32 (*check_overtemp)(struct ixgbe_hw *);
28904 };
28905 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28906
28907 struct ixgbe_eeprom_info {
28908 - struct ixgbe_eeprom_operations ops;
28909 + ixgbe_eeprom_operations_no_const ops;
28910 enum ixgbe_eeprom_type type;
28911 u32 semaphore_delay;
28912 u16 word_size;
28913 @@ -2581,7 +2584,7 @@ struct ixgbe_eeprom_info {
28914
28915 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28916 struct ixgbe_mac_info {
28917 - struct ixgbe_mac_operations ops;
28918 + ixgbe_mac_operations_no_const ops;
28919 enum ixgbe_mac_type type;
28920 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28921 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28922 @@ -2608,7 +2611,7 @@ struct ixgbe_mac_info {
28923 };
28924
28925 struct ixgbe_phy_info {
28926 - struct ixgbe_phy_operations ops;
28927 + ixgbe_phy_operations_no_const ops;
28928 struct mdio_if_info mdio;
28929 enum ixgbe_phy_type type;
28930 u32 id;
28931 @@ -2636,6 +2639,7 @@ struct ixgbe_mbx_operations {
28932 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28933 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28934 };
28935 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28936
28937 struct ixgbe_mbx_stats {
28938 u32 msgs_tx;
28939 @@ -2647,7 +2651,7 @@ struct ixgbe_mbx_stats {
28940 };
28941
28942 struct ixgbe_mbx_info {
28943 - struct ixgbe_mbx_operations ops;
28944 + ixgbe_mbx_operations_no_const ops;
28945 struct ixgbe_mbx_stats stats;
28946 u32 timeout;
28947 u32 usec_delay;
28948 diff -urNp linux-2.6.39.4/drivers/net/ixgbevf/vf.h linux-2.6.39.4/drivers/net/ixgbevf/vf.h
28949 --- linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-05-19 00:06:34.000000000 -0400
28950 +++ linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-08-05 20:34:06.000000000 -0400
28951 @@ -69,6 +69,7 @@ struct ixgbe_mac_operations {
28952 s32 (*clear_vfta)(struct ixgbe_hw *);
28953 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28954 };
28955 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28956
28957 enum ixgbe_mac_type {
28958 ixgbe_mac_unknown = 0,
28959 @@ -78,7 +79,7 @@ enum ixgbe_mac_type {
28960 };
28961
28962 struct ixgbe_mac_info {
28963 - struct ixgbe_mac_operations ops;
28964 + ixgbe_mac_operations_no_const ops;
28965 u8 addr[6];
28966 u8 perm_addr[6];
28967
28968 @@ -102,6 +103,7 @@ struct ixgbe_mbx_operations {
28969 s32 (*check_for_ack)(struct ixgbe_hw *);
28970 s32 (*check_for_rst)(struct ixgbe_hw *);
28971 };
28972 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28973
28974 struct ixgbe_mbx_stats {
28975 u32 msgs_tx;
28976 @@ -113,7 +115,7 @@ struct ixgbe_mbx_stats {
28977 };
28978
28979 struct ixgbe_mbx_info {
28980 - struct ixgbe_mbx_operations ops;
28981 + ixgbe_mbx_operations_no_const ops;
28982 struct ixgbe_mbx_stats stats;
28983 u32 timeout;
28984 u32 udelay;
28985 diff -urNp linux-2.6.39.4/drivers/net/ksz884x.c linux-2.6.39.4/drivers/net/ksz884x.c
28986 --- linux-2.6.39.4/drivers/net/ksz884x.c 2011-05-19 00:06:34.000000000 -0400
28987 +++ linux-2.6.39.4/drivers/net/ksz884x.c 2011-08-05 20:34:06.000000000 -0400
28988 @@ -6536,6 +6536,8 @@ static void netdev_get_ethtool_stats(str
28989 int rc;
28990 u64 counter[TOTAL_PORT_COUNTER_NUM];
28991
28992 + pax_track_stack();
28993 +
28994 mutex_lock(&hw_priv->lock);
28995 n = SWITCH_PORT_NUM;
28996 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28997 diff -urNp linux-2.6.39.4/drivers/net/mlx4/main.c linux-2.6.39.4/drivers/net/mlx4/main.c
28998 --- linux-2.6.39.4/drivers/net/mlx4/main.c 2011-05-19 00:06:34.000000000 -0400
28999 +++ linux-2.6.39.4/drivers/net/mlx4/main.c 2011-08-05 19:44:37.000000000 -0400
29000 @@ -40,6 +40,7 @@
29001 #include <linux/dma-mapping.h>
29002 #include <linux/slab.h>
29003 #include <linux/io-mapping.h>
29004 +#include <linux/sched.h>
29005
29006 #include <linux/mlx4/device.h>
29007 #include <linux/mlx4/doorbell.h>
29008 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
29009 u64 icm_size;
29010 int err;
29011
29012 + pax_track_stack();
29013 +
29014 err = mlx4_QUERY_FW(dev);
29015 if (err) {
29016 if (err == -EACCES)
29017 diff -urNp linux-2.6.39.4/drivers/net/niu.c linux-2.6.39.4/drivers/net/niu.c
29018 --- linux-2.6.39.4/drivers/net/niu.c 2011-05-19 00:06:34.000000000 -0400
29019 +++ linux-2.6.39.4/drivers/net/niu.c 2011-08-05 19:44:37.000000000 -0400
29020 @@ -9067,6 +9067,8 @@ static void __devinit niu_try_msix(struc
29021 int i, num_irqs, err;
29022 u8 first_ldg;
29023
29024 + pax_track_stack();
29025 +
29026 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29027 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29028 ldg_num_map[i] = first_ldg + i;
29029 diff -urNp linux-2.6.39.4/drivers/net/pcnet32.c linux-2.6.39.4/drivers/net/pcnet32.c
29030 --- linux-2.6.39.4/drivers/net/pcnet32.c 2011-05-19 00:06:34.000000000 -0400
29031 +++ linux-2.6.39.4/drivers/net/pcnet32.c 2011-08-05 20:34:06.000000000 -0400
29032 @@ -82,7 +82,7 @@ static int cards_found;
29033 /*
29034 * VLB I/O addresses
29035 */
29036 -static unsigned int pcnet32_portlist[] __initdata =
29037 +static unsigned int pcnet32_portlist[] __devinitdata =
29038 { 0x300, 0x320, 0x340, 0x360, 0 };
29039
29040 static int pcnet32_debug;
29041 @@ -270,7 +270,7 @@ struct pcnet32_private {
29042 struct sk_buff **rx_skbuff;
29043 dma_addr_t *tx_dma_addr;
29044 dma_addr_t *rx_dma_addr;
29045 - struct pcnet32_access a;
29046 + struct pcnet32_access *a;
29047 spinlock_t lock; /* Guard lock */
29048 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29049 unsigned int rx_ring_size; /* current rx ring size */
29050 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29051 u16 val;
29052
29053 netif_wake_queue(dev);
29054 - val = lp->a.read_csr(ioaddr, CSR3);
29055 + val = lp->a->read_csr(ioaddr, CSR3);
29056 val &= 0x00ff;
29057 - lp->a.write_csr(ioaddr, CSR3, val);
29058 + lp->a->write_csr(ioaddr, CSR3, val);
29059 napi_enable(&lp->napi);
29060 }
29061
29062 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29063 r = mii_link_ok(&lp->mii_if);
29064 } else if (lp->chip_version >= PCNET32_79C970A) {
29065 ulong ioaddr = dev->base_addr; /* card base I/O address */
29066 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29067 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29068 } else { /* can not detect link on really old chips */
29069 r = 1;
29070 }
29071 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29072 pcnet32_netif_stop(dev);
29073
29074 spin_lock_irqsave(&lp->lock, flags);
29075 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29076 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29077
29078 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29079
29080 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29081 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29082 {
29083 struct pcnet32_private *lp = netdev_priv(dev);
29084 - struct pcnet32_access *a = &lp->a; /* access to registers */
29085 + struct pcnet32_access *a = lp->a; /* access to registers */
29086 ulong ioaddr = dev->base_addr; /* card base I/O address */
29087 struct sk_buff *skb; /* sk buff */
29088 int x, i; /* counters */
29089 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29090 pcnet32_netif_stop(dev);
29091
29092 spin_lock_irqsave(&lp->lock, flags);
29093 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29094 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29095
29096 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
29097
29098 /* Reset the PCNET32 */
29099 - lp->a.reset(ioaddr);
29100 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29101 + lp->a->reset(ioaddr);
29102 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29103
29104 /* switch pcnet32 to 32bit mode */
29105 - lp->a.write_bcr(ioaddr, 20, 2);
29106 + lp->a->write_bcr(ioaddr, 20, 2);
29107
29108 /* purge & init rings but don't actually restart */
29109 pcnet32_restart(dev, 0x0000);
29110
29111 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29112 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29113
29114 /* Initialize Transmit buffers. */
29115 size = data_len + 15;
29116 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
29117
29118 /* set int loopback in CSR15 */
29119 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
29120 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
29121 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
29122
29123 teststatus = cpu_to_le16(0x8000);
29124 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29125 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29126
29127 /* Check status of descriptors */
29128 for (x = 0; x < numbuffs; x++) {
29129 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
29130 }
29131 }
29132
29133 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29134 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29135 wmb();
29136 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
29137 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
29138 @@ -1015,7 +1015,7 @@ clean_up:
29139 pcnet32_restart(dev, CSR0_NORMAL);
29140 } else {
29141 pcnet32_purge_rx_ring(dev);
29142 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29143 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29144 }
29145 spin_unlock_irqrestore(&lp->lock, flags);
29146
29147 @@ -1025,7 +1025,7 @@ clean_up:
29148 static void pcnet32_led_blink_callback(struct net_device *dev)
29149 {
29150 struct pcnet32_private *lp = netdev_priv(dev);
29151 - struct pcnet32_access *a = &lp->a;
29152 + struct pcnet32_access *a = lp->a;
29153 ulong ioaddr = dev->base_addr;
29154 unsigned long flags;
29155 int i;
29156 @@ -1041,7 +1041,7 @@ static void pcnet32_led_blink_callback(s
29157 static int pcnet32_phys_id(struct net_device *dev, u32 data)
29158 {
29159 struct pcnet32_private *lp = netdev_priv(dev);
29160 - struct pcnet32_access *a = &lp->a;
29161 + struct pcnet32_access *a = lp->a;
29162 ulong ioaddr = dev->base_addr;
29163 unsigned long flags;
29164 int i, regs[4];
29165 @@ -1085,7 +1085,7 @@ static int pcnet32_suspend(struct net_de
29166 {
29167 int csr5;
29168 struct pcnet32_private *lp = netdev_priv(dev);
29169 - struct pcnet32_access *a = &lp->a;
29170 + struct pcnet32_access *a = lp->a;
29171 ulong ioaddr = dev->base_addr;
29172 int ticks;
29173
29174 @@ -1342,8 +1342,8 @@ static int pcnet32_poll(struct napi_stru
29175 spin_lock_irqsave(&lp->lock, flags);
29176 if (pcnet32_tx(dev)) {
29177 /* reset the chip to clear the error condition, then restart */
29178 - lp->a.reset(ioaddr);
29179 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29180 + lp->a->reset(ioaddr);
29181 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29182 pcnet32_restart(dev, CSR0_START);
29183 netif_wake_queue(dev);
29184 }
29185 @@ -1355,12 +1355,12 @@ static int pcnet32_poll(struct napi_stru
29186 __napi_complete(napi);
29187
29188 /* clear interrupt masks */
29189 - val = lp->a.read_csr(ioaddr, CSR3);
29190 + val = lp->a->read_csr(ioaddr, CSR3);
29191 val &= 0x00ff;
29192 - lp->a.write_csr(ioaddr, CSR3, val);
29193 + lp->a->write_csr(ioaddr, CSR3, val);
29194
29195 /* Set interrupt enable. */
29196 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29197 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29198
29199 spin_unlock_irqrestore(&lp->lock, flags);
29200 }
29201 @@ -1383,7 +1383,7 @@ static void pcnet32_get_regs(struct net_
29202 int i, csr0;
29203 u16 *buff = ptr;
29204 struct pcnet32_private *lp = netdev_priv(dev);
29205 - struct pcnet32_access *a = &lp->a;
29206 + struct pcnet32_access *a = lp->a;
29207 ulong ioaddr = dev->base_addr;
29208 unsigned long flags;
29209
29210 @@ -1419,9 +1419,9 @@ static void pcnet32_get_regs(struct net_
29211 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29212 if (lp->phymask & (1 << j)) {
29213 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29214 - lp->a.write_bcr(ioaddr, 33,
29215 + lp->a->write_bcr(ioaddr, 33,
29216 (j << 5) | i);
29217 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29218 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29219 }
29220 }
29221 }
29222 @@ -1803,7 +1803,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29223 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29224 lp->options |= PCNET32_PORT_FD;
29225
29226 - lp->a = *a;
29227 + lp->a = a;
29228
29229 /* prior to register_netdev, dev->name is not yet correct */
29230 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29231 @@ -1862,7 +1862,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29232 if (lp->mii) {
29233 /* lp->phycount and lp->phymask are set to 0 by memset above */
29234
29235 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29236 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29237 /* scan for PHYs */
29238 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29239 unsigned short id1, id2;
29240 @@ -1882,7 +1882,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29241 pr_info("Found PHY %04x:%04x at address %d\n",
29242 id1, id2, i);
29243 }
29244 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29245 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29246 if (lp->phycount > 1)
29247 lp->options |= PCNET32_PORT_MII;
29248 }
29249 @@ -2038,10 +2038,10 @@ static int pcnet32_open(struct net_devic
29250 }
29251
29252 /* Reset the PCNET32 */
29253 - lp->a.reset(ioaddr);
29254 + lp->a->reset(ioaddr);
29255
29256 /* switch pcnet32 to 32bit mode */
29257 - lp->a.write_bcr(ioaddr, 20, 2);
29258 + lp->a->write_bcr(ioaddr, 20, 2);
29259
29260 netif_printk(lp, ifup, KERN_DEBUG, dev,
29261 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29262 @@ -2050,14 +2050,14 @@ static int pcnet32_open(struct net_devic
29263 (u32) (lp->init_dma_addr));
29264
29265 /* set/reset autoselect bit */
29266 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29267 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29268 if (lp->options & PCNET32_PORT_ASEL)
29269 val |= 2;
29270 - lp->a.write_bcr(ioaddr, 2, val);
29271 + lp->a->write_bcr(ioaddr, 2, val);
29272
29273 /* handle full duplex setting */
29274 if (lp->mii_if.full_duplex) {
29275 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29276 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29277 if (lp->options & PCNET32_PORT_FD) {
29278 val |= 1;
29279 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29280 @@ -2067,14 +2067,14 @@ static int pcnet32_open(struct net_devic
29281 if (lp->chip_version == 0x2627)
29282 val |= 3;
29283 }
29284 - lp->a.write_bcr(ioaddr, 9, val);
29285 + lp->a->write_bcr(ioaddr, 9, val);
29286 }
29287
29288 /* set/reset GPSI bit in test register */
29289 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29290 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29291 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29292 val |= 0x10;
29293 - lp->a.write_csr(ioaddr, 124, val);
29294 + lp->a->write_csr(ioaddr, 124, val);
29295
29296 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29297 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29298 @@ -2093,24 +2093,24 @@ static int pcnet32_open(struct net_devic
29299 * duplex, and/or enable auto negotiation, and clear DANAS
29300 */
29301 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29302 - lp->a.write_bcr(ioaddr, 32,
29303 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29304 + lp->a->write_bcr(ioaddr, 32,
29305 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29306 /* disable Auto Negotiation, set 10Mpbs, HD */
29307 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29308 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29309 if (lp->options & PCNET32_PORT_FD)
29310 val |= 0x10;
29311 if (lp->options & PCNET32_PORT_100)
29312 val |= 0x08;
29313 - lp->a.write_bcr(ioaddr, 32, val);
29314 + lp->a->write_bcr(ioaddr, 32, val);
29315 } else {
29316 if (lp->options & PCNET32_PORT_ASEL) {
29317 - lp->a.write_bcr(ioaddr, 32,
29318 - lp->a.read_bcr(ioaddr,
29319 + lp->a->write_bcr(ioaddr, 32,
29320 + lp->a->read_bcr(ioaddr,
29321 32) | 0x0080);
29322 /* enable auto negotiate, setup, disable fd */
29323 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29324 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29325 val |= 0x20;
29326 - lp->a.write_bcr(ioaddr, 32, val);
29327 + lp->a->write_bcr(ioaddr, 32, val);
29328 }
29329 }
29330 } else {
29331 @@ -2123,10 +2123,10 @@ static int pcnet32_open(struct net_devic
29332 * There is really no good other way to handle multiple PHYs
29333 * other than turning off all automatics
29334 */
29335 - val = lp->a.read_bcr(ioaddr, 2);
29336 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29337 - val = lp->a.read_bcr(ioaddr, 32);
29338 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29339 + val = lp->a->read_bcr(ioaddr, 2);
29340 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29341 + val = lp->a->read_bcr(ioaddr, 32);
29342 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29343
29344 if (!(lp->options & PCNET32_PORT_ASEL)) {
29345 /* setup ecmd */
29346 @@ -2136,7 +2136,7 @@ static int pcnet32_open(struct net_devic
29347 ecmd.speed =
29348 lp->
29349 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
29350 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29351 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29352
29353 if (lp->options & PCNET32_PORT_FD) {
29354 ecmd.duplex = DUPLEX_FULL;
29355 @@ -2145,7 +2145,7 @@ static int pcnet32_open(struct net_devic
29356 ecmd.duplex = DUPLEX_HALF;
29357 bcr9 |= ~(1 << 0);
29358 }
29359 - lp->a.write_bcr(ioaddr, 9, bcr9);
29360 + lp->a->write_bcr(ioaddr, 9, bcr9);
29361 }
29362
29363 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29364 @@ -2176,9 +2176,9 @@ static int pcnet32_open(struct net_devic
29365
29366 #ifdef DO_DXSUFLO
29367 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29368 - val = lp->a.read_csr(ioaddr, CSR3);
29369 + val = lp->a->read_csr(ioaddr, CSR3);
29370 val |= 0x40;
29371 - lp->a.write_csr(ioaddr, CSR3, val);
29372 + lp->a->write_csr(ioaddr, CSR3, val);
29373 }
29374 #endif
29375
29376 @@ -2194,11 +2194,11 @@ static int pcnet32_open(struct net_devic
29377 napi_enable(&lp->napi);
29378
29379 /* Re-initialize the PCNET32, and start it when done. */
29380 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29381 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29382 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29383 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29384
29385 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29386 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29387 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29388 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29389
29390 netif_start_queue(dev);
29391
29392 @@ -2210,19 +2210,19 @@ static int pcnet32_open(struct net_devic
29393
29394 i = 0;
29395 while (i++ < 100)
29396 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29397 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29398 break;
29399 /*
29400 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29401 * reports that doing so triggers a bug in the '974.
29402 */
29403 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29404 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29405
29406 netif_printk(lp, ifup, KERN_DEBUG, dev,
29407 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29408 i,
29409 (u32) (lp->init_dma_addr),
29410 - lp->a.read_csr(ioaddr, CSR0));
29411 + lp->a->read_csr(ioaddr, CSR0));
29412
29413 spin_unlock_irqrestore(&lp->lock, flags);
29414
29415 @@ -2236,7 +2236,7 @@ err_free_ring:
29416 * Switch back to 16bit mode to avoid problems with dumb
29417 * DOS packet driver after a warm reboot
29418 */
29419 - lp->a.write_bcr(ioaddr, 20, 4);
29420 + lp->a->write_bcr(ioaddr, 20, 4);
29421
29422 err_free_irq:
29423 spin_unlock_irqrestore(&lp->lock, flags);
29424 @@ -2341,7 +2341,7 @@ static void pcnet32_restart(struct net_d
29425
29426 /* wait for stop */
29427 for (i = 0; i < 100; i++)
29428 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29429 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29430 break;
29431
29432 if (i >= 100)
29433 @@ -2353,13 +2353,13 @@ static void pcnet32_restart(struct net_d
29434 return;
29435
29436 /* ReInit Ring */
29437 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29438 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29439 i = 0;
29440 while (i++ < 1000)
29441 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29442 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29443 break;
29444
29445 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29446 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29447 }
29448
29449 static void pcnet32_tx_timeout(struct net_device *dev)
29450 @@ -2371,8 +2371,8 @@ static void pcnet32_tx_timeout(struct ne
29451 /* Transmitter timeout, serious problems. */
29452 if (pcnet32_debug & NETIF_MSG_DRV)
29453 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29454 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29455 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29456 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29457 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29458 dev->stats.tx_errors++;
29459 if (netif_msg_tx_err(lp)) {
29460 int i;
29461 @@ -2415,7 +2415,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29462
29463 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29464 "%s() called, csr0 %4.4x\n",
29465 - __func__, lp->a.read_csr(ioaddr, CSR0));
29466 + __func__, lp->a->read_csr(ioaddr, CSR0));
29467
29468 /* Default status -- will not enable Successful-TxDone
29469 * interrupt when that option is available to us.
29470 @@ -2445,7 +2445,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29471 dev->stats.tx_bytes += skb->len;
29472
29473 /* Trigger an immediate send poll. */
29474 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29475 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29476
29477 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29478 lp->tx_full = 1;
29479 @@ -2470,16 +2470,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29480
29481 spin_lock(&lp->lock);
29482
29483 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29484 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29485 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29486 if (csr0 == 0xffff)
29487 break; /* PCMCIA remove happened */
29488 /* Acknowledge all of the current interrupt sources ASAP. */
29489 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29490 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29491
29492 netif_printk(lp, intr, KERN_DEBUG, dev,
29493 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29494 - csr0, lp->a.read_csr(ioaddr, CSR0));
29495 + csr0, lp->a->read_csr(ioaddr, CSR0));
29496
29497 /* Log misc errors. */
29498 if (csr0 & 0x4000)
29499 @@ -2506,19 +2506,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29500 if (napi_schedule_prep(&lp->napi)) {
29501 u16 val;
29502 /* set interrupt masks */
29503 - val = lp->a.read_csr(ioaddr, CSR3);
29504 + val = lp->a->read_csr(ioaddr, CSR3);
29505 val |= 0x5f00;
29506 - lp->a.write_csr(ioaddr, CSR3, val);
29507 + lp->a->write_csr(ioaddr, CSR3, val);
29508
29509 __napi_schedule(&lp->napi);
29510 break;
29511 }
29512 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29513 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29514 }
29515
29516 netif_printk(lp, intr, KERN_DEBUG, dev,
29517 "exiting interrupt, csr0=%#4.4x\n",
29518 - lp->a.read_csr(ioaddr, CSR0));
29519 + lp->a->read_csr(ioaddr, CSR0));
29520
29521 spin_unlock(&lp->lock);
29522
29523 @@ -2538,20 +2538,20 @@ static int pcnet32_close(struct net_devi
29524
29525 spin_lock_irqsave(&lp->lock, flags);
29526
29527 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29528 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29529
29530 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29531 "Shutting down ethercard, status was %2.2x\n",
29532 - lp->a.read_csr(ioaddr, CSR0));
29533 + lp->a->read_csr(ioaddr, CSR0));
29534
29535 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29536 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29537 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29538
29539 /*
29540 * Switch back to 16bit mode to avoid problems with dumb
29541 * DOS packet driver after a warm reboot
29542 */
29543 - lp->a.write_bcr(ioaddr, 20, 4);
29544 + lp->a->write_bcr(ioaddr, 20, 4);
29545
29546 spin_unlock_irqrestore(&lp->lock, flags);
29547
29548 @@ -2574,7 +2574,7 @@ static struct net_device_stats *pcnet32_
29549 unsigned long flags;
29550
29551 spin_lock_irqsave(&lp->lock, flags);
29552 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29553 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29554 spin_unlock_irqrestore(&lp->lock, flags);
29555
29556 return &dev->stats;
29557 @@ -2596,10 +2596,10 @@ static void pcnet32_load_multicast(struc
29558 if (dev->flags & IFF_ALLMULTI) {
29559 ib->filter[0] = cpu_to_le32(~0U);
29560 ib->filter[1] = cpu_to_le32(~0U);
29561 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29562 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29563 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29564 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29565 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29566 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29567 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29568 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29569 return;
29570 }
29571 /* clear the multicast filter */
29572 @@ -2619,7 +2619,7 @@ static void pcnet32_load_multicast(struc
29573 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29574 }
29575 for (i = 0; i < 4; i++)
29576 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29577 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29578 le16_to_cpu(mcast_table[i]));
29579 }
29580
29581 @@ -2634,28 +2634,28 @@ static void pcnet32_set_multicast_list(s
29582
29583 spin_lock_irqsave(&lp->lock, flags);
29584 suspended = pcnet32_suspend(dev, &flags, 0);
29585 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29586 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29587 if (dev->flags & IFF_PROMISC) {
29588 /* Log any net taps. */
29589 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29590 lp->init_block->mode =
29591 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29592 7);
29593 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29594 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29595 } else {
29596 lp->init_block->mode =
29597 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29598 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29599 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29600 pcnet32_load_multicast(dev);
29601 }
29602
29603 if (suspended) {
29604 int csr5;
29605 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29606 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29607 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29608 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29609 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29610 } else {
29611 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29612 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29613 pcnet32_restart(dev, CSR0_NORMAL);
29614 netif_wake_queue(dev);
29615 }
29616 @@ -2673,8 +2673,8 @@ static int mdio_read(struct net_device *
29617 if (!lp->mii)
29618 return 0;
29619
29620 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29621 - val_out = lp->a.read_bcr(ioaddr, 34);
29622 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29623 + val_out = lp->a->read_bcr(ioaddr, 34);
29624
29625 return val_out;
29626 }
29627 @@ -2688,8 +2688,8 @@ static void mdio_write(struct net_device
29628 if (!lp->mii)
29629 return;
29630
29631 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29632 - lp->a.write_bcr(ioaddr, 34, val);
29633 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29634 + lp->a->write_bcr(ioaddr, 34, val);
29635 }
29636
29637 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29638 @@ -2766,7 +2766,7 @@ static void pcnet32_check_media(struct n
29639 curr_link = mii_link_ok(&lp->mii_if);
29640 } else {
29641 ulong ioaddr = dev->base_addr; /* card base I/O address */
29642 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29643 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29644 }
29645 if (!curr_link) {
29646 if (prev_link || verbose) {
29647 @@ -2789,13 +2789,13 @@ static void pcnet32_check_media(struct n
29648 (ecmd.duplex == DUPLEX_FULL)
29649 ? "full" : "half");
29650 }
29651 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29652 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29653 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29654 if (lp->mii_if.full_duplex)
29655 bcr9 |= (1 << 0);
29656 else
29657 bcr9 &= ~(1 << 0);
29658 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29659 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29660 }
29661 } else {
29662 netif_info(lp, link, dev, "link up\n");
29663 diff -urNp linux-2.6.39.4/drivers/net/ppp_generic.c linux-2.6.39.4/drivers/net/ppp_generic.c
29664 --- linux-2.6.39.4/drivers/net/ppp_generic.c 2011-05-19 00:06:34.000000000 -0400
29665 +++ linux-2.6.39.4/drivers/net/ppp_generic.c 2011-08-05 19:44:37.000000000 -0400
29666 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29667 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29668 struct ppp_stats stats;
29669 struct ppp_comp_stats cstats;
29670 - char *vers;
29671
29672 switch (cmd) {
29673 case SIOCGPPPSTATS:
29674 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29675 break;
29676
29677 case SIOCGPPPVER:
29678 - vers = PPP_VERSION;
29679 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29680 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29681 break;
29682 err = 0;
29683 break;
29684 diff -urNp linux-2.6.39.4/drivers/net/r8169.c linux-2.6.39.4/drivers/net/r8169.c
29685 --- linux-2.6.39.4/drivers/net/r8169.c 2011-05-19 00:06:34.000000000 -0400
29686 +++ linux-2.6.39.4/drivers/net/r8169.c 2011-08-05 20:34:06.000000000 -0400
29687 @@ -552,12 +552,12 @@ struct rtl8169_private {
29688 struct mdio_ops {
29689 void (*write)(void __iomem *, int, int);
29690 int (*read)(void __iomem *, int);
29691 - } mdio_ops;
29692 + } __no_const mdio_ops;
29693
29694 struct pll_power_ops {
29695 void (*down)(struct rtl8169_private *);
29696 void (*up)(struct rtl8169_private *);
29697 - } pll_power_ops;
29698 + } __no_const pll_power_ops;
29699
29700 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29701 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29702 diff -urNp linux-2.6.39.4/drivers/net/tg3.h linux-2.6.39.4/drivers/net/tg3.h
29703 --- linux-2.6.39.4/drivers/net/tg3.h 2011-05-19 00:06:34.000000000 -0400
29704 +++ linux-2.6.39.4/drivers/net/tg3.h 2011-08-05 19:44:37.000000000 -0400
29705 @@ -131,6 +131,7 @@
29706 #define CHIPREV_ID_5750_A0 0x4000
29707 #define CHIPREV_ID_5750_A1 0x4001
29708 #define CHIPREV_ID_5750_A3 0x4003
29709 +#define CHIPREV_ID_5750_C1 0x4201
29710 #define CHIPREV_ID_5750_C2 0x4202
29711 #define CHIPREV_ID_5752_A0_HW 0x5000
29712 #define CHIPREV_ID_5752_A0 0x6000
29713 diff -urNp linux-2.6.39.4/drivers/net/tokenring/abyss.c linux-2.6.39.4/drivers/net/tokenring/abyss.c
29714 --- linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-05-19 00:06:34.000000000 -0400
29715 +++ linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-08-05 20:34:06.000000000 -0400
29716 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29717
29718 static int __init abyss_init (void)
29719 {
29720 - abyss_netdev_ops = tms380tr_netdev_ops;
29721 + pax_open_kernel();
29722 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29723
29724 - abyss_netdev_ops.ndo_open = abyss_open;
29725 - abyss_netdev_ops.ndo_stop = abyss_close;
29726 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29727 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29728 + pax_close_kernel();
29729
29730 return pci_register_driver(&abyss_driver);
29731 }
29732 diff -urNp linux-2.6.39.4/drivers/net/tokenring/madgemc.c linux-2.6.39.4/drivers/net/tokenring/madgemc.c
29733 --- linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-05-19 00:06:34.000000000 -0400
29734 +++ linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-08-05 20:34:06.000000000 -0400
29735 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29736
29737 static int __init madgemc_init (void)
29738 {
29739 - madgemc_netdev_ops = tms380tr_netdev_ops;
29740 - madgemc_netdev_ops.ndo_open = madgemc_open;
29741 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29742 + pax_open_kernel();
29743 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29744 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29745 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29746 + pax_close_kernel();
29747
29748 return mca_register_driver (&madgemc_driver);
29749 }
29750 diff -urNp linux-2.6.39.4/drivers/net/tokenring/proteon.c linux-2.6.39.4/drivers/net/tokenring/proteon.c
29751 --- linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-05-19 00:06:34.000000000 -0400
29752 +++ linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-08-05 20:34:06.000000000 -0400
29753 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29754 struct platform_device *pdev;
29755 int i, num = 0, err = 0;
29756
29757 - proteon_netdev_ops = tms380tr_netdev_ops;
29758 - proteon_netdev_ops.ndo_open = proteon_open;
29759 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29760 + pax_open_kernel();
29761 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29762 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29763 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29764 + pax_close_kernel();
29765
29766 err = platform_driver_register(&proteon_driver);
29767 if (err)
29768 diff -urNp linux-2.6.39.4/drivers/net/tokenring/skisa.c linux-2.6.39.4/drivers/net/tokenring/skisa.c
29769 --- linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-05-19 00:06:34.000000000 -0400
29770 +++ linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-08-05 20:34:06.000000000 -0400
29771 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29772 struct platform_device *pdev;
29773 int i, num = 0, err = 0;
29774
29775 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29776 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29777 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29778 + pax_open_kernel();
29779 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29780 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29781 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29782 + pax_close_kernel();
29783
29784 err = platform_driver_register(&sk_isa_driver);
29785 if (err)
29786 diff -urNp linux-2.6.39.4/drivers/net/tulip/de2104x.c linux-2.6.39.4/drivers/net/tulip/de2104x.c
29787 --- linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-05-19 00:06:34.000000000 -0400
29788 +++ linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-08-05 19:44:37.000000000 -0400
29789 @@ -1817,6 +1817,8 @@ static void __devinit de21041_get_srom_i
29790 struct de_srom_info_leaf *il;
29791 void *bufp;
29792
29793 + pax_track_stack();
29794 +
29795 /* download entire eeprom */
29796 for (i = 0; i < DE_EEPROM_WORDS; i++)
29797 ((__le16 *)ee_data)[i] =
29798 diff -urNp linux-2.6.39.4/drivers/net/tulip/de4x5.c linux-2.6.39.4/drivers/net/tulip/de4x5.c
29799 --- linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-05-19 00:06:34.000000000 -0400
29800 +++ linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-08-05 19:44:37.000000000 -0400
29801 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29802 for (i=0; i<ETH_ALEN; i++) {
29803 tmp.addr[i] = dev->dev_addr[i];
29804 }
29805 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29806 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29807 break;
29808
29809 case DE4X5_SET_HWADDR: /* Set the hardware address */
29810 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29811 spin_lock_irqsave(&lp->lock, flags);
29812 memcpy(&statbuf, &lp->pktStats, ioc->len);
29813 spin_unlock_irqrestore(&lp->lock, flags);
29814 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29815 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29816 return -EFAULT;
29817 break;
29818 }
29819 diff -urNp linux-2.6.39.4/drivers/net/usb/hso.c linux-2.6.39.4/drivers/net/usb/hso.c
29820 --- linux-2.6.39.4/drivers/net/usb/hso.c 2011-05-19 00:06:34.000000000 -0400
29821 +++ linux-2.6.39.4/drivers/net/usb/hso.c 2011-08-05 19:44:37.000000000 -0400
29822 @@ -71,7 +71,7 @@
29823 #include <asm/byteorder.h>
29824 #include <linux/serial_core.h>
29825 #include <linux/serial.h>
29826 -
29827 +#include <asm/local.h>
29828
29829 #define MOD_AUTHOR "Option Wireless"
29830 #define MOD_DESCRIPTION "USB High Speed Option driver"
29831 @@ -257,7 +257,7 @@ struct hso_serial {
29832
29833 /* from usb_serial_port */
29834 struct tty_struct *tty;
29835 - int open_count;
29836 + local_t open_count;
29837 spinlock_t serial_lock;
29838
29839 int (*write_data) (struct hso_serial *serial);
29840 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29841 struct urb *urb;
29842
29843 urb = serial->rx_urb[0];
29844 - if (serial->open_count > 0) {
29845 + if (local_read(&serial->open_count) > 0) {
29846 count = put_rxbuf_data(urb, serial);
29847 if (count == -1)
29848 return;
29849 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29850 DUMP1(urb->transfer_buffer, urb->actual_length);
29851
29852 /* Anyone listening? */
29853 - if (serial->open_count == 0)
29854 + if (local_read(&serial->open_count) == 0)
29855 return;
29856
29857 if (status == 0) {
29858 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29859 spin_unlock_irq(&serial->serial_lock);
29860
29861 /* check for port already opened, if not set the termios */
29862 - serial->open_count++;
29863 - if (serial->open_count == 1) {
29864 + if (local_inc_return(&serial->open_count) == 1) {
29865 serial->rx_state = RX_IDLE;
29866 /* Force default termio settings */
29867 _hso_serial_set_termios(tty, NULL);
29868 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29869 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29870 if (result) {
29871 hso_stop_serial_device(serial->parent);
29872 - serial->open_count--;
29873 + local_dec(&serial->open_count);
29874 kref_put(&serial->parent->ref, hso_serial_ref_free);
29875 }
29876 } else {
29877 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29878
29879 /* reset the rts and dtr */
29880 /* do the actual close */
29881 - serial->open_count--;
29882 + local_dec(&serial->open_count);
29883
29884 - if (serial->open_count <= 0) {
29885 - serial->open_count = 0;
29886 + if (local_read(&serial->open_count) <= 0) {
29887 + local_set(&serial->open_count, 0);
29888 spin_lock_irq(&serial->serial_lock);
29889 if (serial->tty == tty) {
29890 serial->tty->driver_data = NULL;
29891 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29892
29893 /* the actual setup */
29894 spin_lock_irqsave(&serial->serial_lock, flags);
29895 - if (serial->open_count)
29896 + if (local_read(&serial->open_count))
29897 _hso_serial_set_termios(tty, old);
29898 else
29899 tty->termios = old;
29900 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29901 D1("Pending read interrupt on port %d\n", i);
29902 spin_lock(&serial->serial_lock);
29903 if (serial->rx_state == RX_IDLE &&
29904 - serial->open_count > 0) {
29905 + local_read(&serial->open_count) > 0) {
29906 /* Setup and send a ctrl req read on
29907 * port i */
29908 if (!serial->rx_urb_filled[0]) {
29909 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
29910 /* Start all serial ports */
29911 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29912 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29913 - if (dev2ser(serial_table[i])->open_count) {
29914 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29915 result =
29916 hso_start_serial_device(serial_table[i], GFP_NOIO);
29917 hso_kick_transmit(dev2ser(serial_table[i]));
29918 diff -urNp linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29919 --- linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-05-19 00:06:34.000000000 -0400
29920 +++ linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-05 19:44:37.000000000 -0400
29921 @@ -631,8 +631,7 @@ vmxnet3_set_rss_indir(struct net_device
29922 * Return with error code if any of the queue indices
29923 * is out of range
29924 */
29925 - if (p->ring_index[i] < 0 ||
29926 - p->ring_index[i] >= adapter->num_rx_queues)
29927 + if (p->ring_index[i] >= adapter->num_rx_queues)
29928 return -EINVAL;
29929 }
29930
29931 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-config.h linux-2.6.39.4/drivers/net/vxge/vxge-config.h
29932 --- linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-05-19 00:06:34.000000000 -0400
29933 +++ linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-08-05 20:34:06.000000000 -0400
29934 @@ -508,7 +508,7 @@ struct vxge_hw_uld_cbs {
29935 void (*link_down)(struct __vxge_hw_device *devh);
29936 void (*crit_err)(struct __vxge_hw_device *devh,
29937 enum vxge_hw_event type, u64 ext_data);
29938 -};
29939 +} __no_const;
29940
29941 /*
29942 * struct __vxge_hw_blockpool_entry - Block private data structure
29943 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-main.c linux-2.6.39.4/drivers/net/vxge/vxge-main.c
29944 --- linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-05-19 00:06:34.000000000 -0400
29945 +++ linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-08-05 19:44:37.000000000 -0400
29946 @@ -97,6 +97,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29947 struct sk_buff *completed[NR_SKB_COMPLETED];
29948 int more;
29949
29950 + pax_track_stack();
29951 +
29952 do {
29953 more = 0;
29954 skb_ptr = completed;
29955 @@ -1927,6 +1929,8 @@ static enum vxge_hw_status vxge_rth_conf
29956 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29957 int index;
29958
29959 + pax_track_stack();
29960 +
29961 /*
29962 * Filling
29963 * - itable with bucket numbers
29964 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h
29965 --- linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-05-19 00:06:34.000000000 -0400
29966 +++ linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:34:06.000000000 -0400
29967 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29968 struct vxge_hw_mempool_dma *dma_object,
29969 u32 index,
29970 u32 is_last);
29971 -};
29972 +} __no_const;
29973
29974 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29975 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29976 diff -urNp linux-2.6.39.4/drivers/net/wan/cycx_x25.c linux-2.6.39.4/drivers/net/wan/cycx_x25.c
29977 --- linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-05-19 00:06:34.000000000 -0400
29978 +++ linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-08-05 19:44:37.000000000 -0400
29979 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29980 unsigned char hex[1024],
29981 * phex = hex;
29982
29983 + pax_track_stack();
29984 +
29985 if (len >= (sizeof(hex) / 2))
29986 len = (sizeof(hex) / 2) - 1;
29987
29988 diff -urNp linux-2.6.39.4/drivers/net/wan/hdlc_x25.c linux-2.6.39.4/drivers/net/wan/hdlc_x25.c
29989 --- linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-05-19 00:06:34.000000000 -0400
29990 +++ linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-08-05 20:34:06.000000000 -0400
29991 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29992
29993 static int x25_open(struct net_device *dev)
29994 {
29995 - struct lapb_register_struct cb;
29996 + static struct lapb_register_struct cb = {
29997 + .connect_confirmation = x25_connected,
29998 + .connect_indication = x25_connected,
29999 + .disconnect_confirmation = x25_disconnected,
30000 + .disconnect_indication = x25_disconnected,
30001 + .data_indication = x25_data_indication,
30002 + .data_transmit = x25_data_transmit
30003 + };
30004 int result;
30005
30006 - cb.connect_confirmation = x25_connected;
30007 - cb.connect_indication = x25_connected;
30008 - cb.disconnect_confirmation = x25_disconnected;
30009 - cb.disconnect_indication = x25_disconnected;
30010 - cb.data_indication = x25_data_indication;
30011 - cb.data_transmit = x25_data_transmit;
30012 -
30013 result = lapb_register(dev, &cb);
30014 if (result != LAPB_OK)
30015 return result;
30016 diff -urNp linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c
30017 --- linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-05-19 00:06:34.000000000 -0400
30018 +++ linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-05 19:44:37.000000000 -0400
30019 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
30020 int do_autopm = 1;
30021 DECLARE_COMPLETION_ONSTACK(notif_completion);
30022
30023 + pax_track_stack();
30024 +
30025 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30026 i2400m, ack, ack_size);
30027 BUG_ON(_ack == i2400m->bm_ack_buf);
30028 diff -urNp linux-2.6.39.4/drivers/net/wireless/airo.c linux-2.6.39.4/drivers/net/wireless/airo.c
30029 --- linux-2.6.39.4/drivers/net/wireless/airo.c 2011-05-19 00:06:34.000000000 -0400
30030 +++ linux-2.6.39.4/drivers/net/wireless/airo.c 2011-08-05 19:44:37.000000000 -0400
30031 @@ -3001,6 +3001,8 @@ static void airo_process_scan_results (s
30032 BSSListElement * loop_net;
30033 BSSListElement * tmp_net;
30034
30035 + pax_track_stack();
30036 +
30037 /* Blow away current list of scan results */
30038 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30039 list_move_tail (&loop_net->list, &ai->network_free_list);
30040 @@ -3792,6 +3794,8 @@ static u16 setup_card(struct airo_info *
30041 WepKeyRid wkr;
30042 int rc;
30043
30044 + pax_track_stack();
30045 +
30046 memset( &mySsid, 0, sizeof( mySsid ) );
30047 kfree (ai->flash);
30048 ai->flash = NULL;
30049 @@ -4760,6 +4764,8 @@ static int proc_stats_rid_open( struct i
30050 __le32 *vals = stats.vals;
30051 int len;
30052
30053 + pax_track_stack();
30054 +
30055 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30056 return -ENOMEM;
30057 data = file->private_data;
30058 @@ -5483,6 +5489,8 @@ static int proc_BSSList_open( struct ino
30059 /* If doLoseSync is not 1, we won't do a Lose Sync */
30060 int doLoseSync = -1;
30061
30062 + pax_track_stack();
30063 +
30064 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30065 return -ENOMEM;
30066 data = file->private_data;
30067 @@ -7190,6 +7198,8 @@ static int airo_get_aplist(struct net_de
30068 int i;
30069 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30070
30071 + pax_track_stack();
30072 +
30073 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30074 if (!qual)
30075 return -ENOMEM;
30076 @@ -7750,6 +7760,8 @@ static void airo_read_wireless_stats(str
30077 CapabilityRid cap_rid;
30078 __le32 *vals = stats_rid.vals;
30079
30080 + pax_track_stack();
30081 +
30082 /* Get stats out of the card */
30083 clear_bit(JOB_WSTATS, &local->jobs);
30084 if (local->power.event) {
30085 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c
30086 --- linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-05-19 00:06:34.000000000 -0400
30087 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-05 19:44:37.000000000 -0400
30088 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30089 unsigned int v;
30090 u64 tsf;
30091
30092 + pax_track_stack();
30093 +
30094 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30095 len += snprintf(buf+len, sizeof(buf)-len,
30096 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30097 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30098 unsigned int len = 0;
30099 unsigned int i;
30100
30101 + pax_track_stack();
30102 +
30103 len += snprintf(buf+len, sizeof(buf)-len,
30104 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
30105
30106 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
30107 unsigned int i;
30108 unsigned int v;
30109
30110 + pax_track_stack();
30111 +
30112 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
30113 sc->ah->ah_ant_mode);
30114 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
30115 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
30116 unsigned int len = 0;
30117 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
30118
30119 + pax_track_stack();
30120 +
30121 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
30122 sc->bssidmask);
30123 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
30124 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
30125 unsigned int len = 0;
30126 int i;
30127
30128 + pax_track_stack();
30129 +
30130 len += snprintf(buf+len, sizeof(buf)-len,
30131 "RX\n---------------------\n");
30132 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
30133 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
30134 char buf[700];
30135 unsigned int len = 0;
30136
30137 + pax_track_stack();
30138 +
30139 len += snprintf(buf+len, sizeof(buf)-len,
30140 "HW has PHY error counters:\t%s\n",
30141 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
30142 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
30143 struct ath5k_buf *bf, *bf0;
30144 int i, n;
30145
30146 + pax_track_stack();
30147 +
30148 len += snprintf(buf+len, sizeof(buf)-len,
30149 "available txbuffers: %d\n", sc->txbuf_len);
30150
30151 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30152 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-05-19 00:06:34.000000000 -0400
30153 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-05 19:44:37.000000000 -0400
30154 @@ -734,6 +734,8 @@ static void ar9003_hw_tx_iq_cal(struct a
30155 s32 i, j, ip, im, nmeasurement;
30156 u8 nchains = get_streams(common->tx_chainmask);
30157
30158 + pax_track_stack();
30159 +
30160 for (ip = 0; ip < MPASS; ip++) {
30161 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
30162 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
30163 @@ -856,6 +858,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30164 int i, ip, im, j;
30165 int nmeasurement;
30166
30167 + pax_track_stack();
30168 +
30169 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30170 if (ah->txchainmask & (1 << i))
30171 num_chains++;
30172 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30173 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-05-19 00:06:34.000000000 -0400
30174 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-05 19:44:37.000000000 -0400
30175 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30176 int theta_low_bin = 0;
30177 int i;
30178
30179 + pax_track_stack();
30180 +
30181 /* disregard any bin that contains <= 16 samples */
30182 thresh_accum_cnt = 16;
30183 scale_factor = 5;
30184 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c
30185 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-05-19 00:06:34.000000000 -0400
30186 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-05 19:44:37.000000000 -0400
30187 @@ -335,6 +335,8 @@ static ssize_t read_file_interrupt(struc
30188 char buf[512];
30189 unsigned int len = 0;
30190
30191 + pax_track_stack();
30192 +
30193 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30194 len += snprintf(buf + len, sizeof(buf) - len,
30195 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30196 @@ -422,6 +424,8 @@ static ssize_t read_file_wiphy(struct fi
30197 u8 addr[ETH_ALEN];
30198 u32 tmp;
30199
30200 + pax_track_stack();
30201 +
30202 len += snprintf(buf + len, sizeof(buf) - len,
30203 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30204 wiphy_name(sc->hw->wiphy),
30205 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c
30206 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-05-19 00:06:34.000000000 -0400
30207 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-08-05 20:34:06.000000000 -0400
30208 @@ -737,6 +737,8 @@ static ssize_t read_file_tgt_stats(struc
30209 unsigned int len = 0;
30210 int ret = 0;
30211
30212 + pax_track_stack();
30213 +
30214 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30215
30216 WMI_CMD(WMI_TGT_STATS_CMDID);
30217 @@ -782,6 +784,8 @@ static ssize_t read_file_xmit(struct fil
30218 char buf[512];
30219 unsigned int len = 0;
30220
30221 + pax_track_stack();
30222 +
30223 len += snprintf(buf + len, sizeof(buf) - len,
30224 "%20s : %10u\n", "Buffers queued",
30225 priv->debug.tx_stats.buf_queued);
30226 @@ -831,6 +835,8 @@ static ssize_t read_file_recv(struct fil
30227 char buf[512];
30228 unsigned int len = 0;
30229
30230 + pax_track_stack();
30231 +
30232 len += snprintf(buf + len, sizeof(buf) - len,
30233 "%20s : %10u\n", "SKBs allocated",
30234 priv->debug.rx_stats.skb_allocated);
30235 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h
30236 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-05-19 00:06:34.000000000 -0400
30237 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-05 20:34:06.000000000 -0400
30238 @@ -592,7 +592,7 @@ struct ath_hw_private_ops {
30239
30240 /* ANI */
30241 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30242 -};
30243 +} __no_const;
30244
30245 /**
30246 * struct ath_hw_ops - callbacks used by hardware code and driver code
30247 @@ -642,7 +642,7 @@ struct ath_hw_ops {
30248 u32 burstDuration);
30249 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
30250 u32 vmf);
30251 -};
30252 +} __no_const;
30253
30254 struct ath_nf_limits {
30255 s16 max;
30256 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c
30257 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-19 00:06:34.000000000 -0400
30258 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-05 19:44:37.000000000 -0400
30259 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30260 int err;
30261 DECLARE_SSID_BUF(ssid);
30262
30263 + pax_track_stack();
30264 +
30265 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30266
30267 if (ssid_len)
30268 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30269 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30270 int err;
30271
30272 + pax_track_stack();
30273 +
30274 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30275 idx, keylen, len);
30276
30277 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30278 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-19 00:06:34.000000000 -0400
30279 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-05 19:44:37.000000000 -0400
30280 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30281 unsigned long flags;
30282 DECLARE_SSID_BUF(ssid);
30283
30284 + pax_track_stack();
30285 +
30286 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30287 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30288 print_ssid(ssid, info_element->data, info_element->len),
30289 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30290 --- linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-05-19 00:06:34.000000000 -0400
30291 +++ linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-05 20:34:06.000000000 -0400
30292 @@ -3958,7 +3958,9 @@ static int iwl3945_pci_probe(struct pci_
30293 */
30294 if (iwl3945_mod_params.disable_hw_scan) {
30295 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30296 - iwl3945_hw_ops.hw_scan = NULL;
30297 + pax_open_kernel();
30298 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30299 + pax_close_kernel();
30300 }
30301
30302 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30303 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c
30304 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-06-25 12:55:22.000000000 -0400
30305 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:34:06.000000000 -0400
30306 @@ -3974,7 +3974,9 @@ static int iwl_pci_probe(struct pci_dev
30307 if (cfg->mod_params->disable_hw_scan) {
30308 dev_printk(KERN_DEBUG, &(pdev->dev),
30309 "sw scan support is deprecated\n");
30310 - iwlagn_hw_ops.hw_scan = NULL;
30311 + pax_open_kernel();
30312 + *(void **)&iwlagn_hw_ops.hw_scan = NULL;
30313 + pax_close_kernel();
30314 }
30315
30316 hw = iwl_alloc_all(cfg);
30317 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30318 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-19 00:06:34.000000000 -0400
30319 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-05 19:44:37.000000000 -0400
30320 @@ -883,6 +883,8 @@ static void rs_tx_status(void *priv_r, s
30321 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30322 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30323
30324 + pax_track_stack();
30325 +
30326 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30327
30328 /* Treat uninitialized rate scaling data same as non-existing. */
30329 @@ -2894,6 +2896,8 @@ static void rs_fill_link_cmd(struct iwl_
30330 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30331 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30332
30333 + pax_track_stack();
30334 +
30335 /* Override starting rate (index 0) if needed for debug purposes */
30336 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30337
30338 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30339 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-19 00:06:34.000000000 -0400
30340 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-05 19:44:37.000000000 -0400
30341 @@ -549,6 +549,8 @@ static ssize_t iwl_dbgfs_status_read(str
30342 int pos = 0;
30343 const size_t bufsz = sizeof(buf);
30344
30345 + pax_track_stack();
30346 +
30347 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30348 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30349 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30350 @@ -681,6 +683,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30351 char buf[256 * NUM_IWL_RXON_CTX];
30352 const size_t bufsz = sizeof(buf);
30353
30354 + pax_track_stack();
30355 +
30356 for_each_context(priv, ctx) {
30357 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30358 ctx->ctxid);
30359 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30360 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-05-19 00:06:34.000000000 -0400
30361 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-05 19:44:37.000000000 -0400
30362 @@ -68,8 +68,8 @@ do {
30363 } while (0)
30364
30365 #else
30366 -#define IWL_DEBUG(__priv, level, fmt, args...)
30367 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30368 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30369 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30370 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30371 const void *p, u32 len)
30372 {}
30373 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30374 --- linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-19 00:06:34.000000000 -0400
30375 +++ linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-05 19:44:37.000000000 -0400
30376 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30377 int buf_len = 512;
30378 size_t len = 0;
30379
30380 + pax_track_stack();
30381 +
30382 if (*ppos != 0)
30383 return 0;
30384 if (count < sizeof(buf))
30385 diff -urNp linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c
30386 --- linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-05-19 00:06:34.000000000 -0400
30387 +++ linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-05 20:34:06.000000000 -0400
30388 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30389 return -EINVAL;
30390
30391 if (fake_hw_scan) {
30392 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30393 - mac80211_hwsim_ops.sw_scan_start = NULL;
30394 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30395 + pax_open_kernel();
30396 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30397 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30398 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30399 + pax_close_kernel();
30400 }
30401
30402 spin_lock_init(&hwsim_radio_lock);
30403 diff -urNp linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c
30404 --- linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-05-19 00:06:34.000000000 -0400
30405 +++ linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-08-05 19:44:37.000000000 -0400
30406 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30407
30408 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30409
30410 - if (rts_threshold < 0 || rts_threshold > 2347)
30411 + if (rts_threshold > 2347)
30412 rts_threshold = 2347;
30413
30414 tmp = cpu_to_le32(rts_threshold);
30415 diff -urNp linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30416 --- linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-05-19 00:06:34.000000000 -0400
30417 +++ linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-05 19:44:37.000000000 -0400
30418 @@ -827,6 +827,8 @@ static bool _rtl92c_phy_sw_chnl_step_by_
30419 u8 rfpath;
30420 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30421
30422 + pax_track_stack();
30423 +
30424 precommoncmdcnt = 0;
30425 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30426 MAX_PRECMD_CNT,
30427 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h
30428 --- linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-05-19 00:06:34.000000000 -0400
30429 +++ linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-05 20:34:06.000000000 -0400
30430 @@ -260,7 +260,7 @@ struct wl1251_if_operations {
30431 void (*reset)(struct wl1251 *wl);
30432 void (*enable_irq)(struct wl1251 *wl);
30433 void (*disable_irq)(struct wl1251 *wl);
30434 -};
30435 +} __no_const;
30436
30437 struct wl1251 {
30438 struct ieee80211_hw *hw;
30439 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c
30440 --- linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-05-19 00:06:34.000000000 -0400
30441 +++ linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-08-05 19:44:37.000000000 -0400
30442 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30443 u32 chunk_len;
30444 int i;
30445
30446 + pax_track_stack();
30447 +
30448 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30449
30450 spi_message_init(&m);
30451 diff -urNp linux-2.6.39.4/drivers/oprofile/buffer_sync.c linux-2.6.39.4/drivers/oprofile/buffer_sync.c
30452 --- linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-06-25 12:55:22.000000000 -0400
30453 +++ linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-08-05 19:44:37.000000000 -0400
30454 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30455 if (cookie == NO_COOKIE)
30456 offset = pc;
30457 if (cookie == INVALID_COOKIE) {
30458 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30459 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30460 offset = pc;
30461 }
30462 if (cookie != last_cookie) {
30463 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30464 /* add userspace sample */
30465
30466 if (!mm) {
30467 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30468 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30469 return 0;
30470 }
30471
30472 cookie = lookup_dcookie(mm, s->eip, &offset);
30473
30474 if (cookie == INVALID_COOKIE) {
30475 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30476 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30477 return 0;
30478 }
30479
30480 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30481 /* ignore backtraces if failed to add a sample */
30482 if (state == sb_bt_start) {
30483 state = sb_bt_ignore;
30484 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30485 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30486 }
30487 }
30488 release_mm(mm);
30489 diff -urNp linux-2.6.39.4/drivers/oprofile/event_buffer.c linux-2.6.39.4/drivers/oprofile/event_buffer.c
30490 --- linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-05-19 00:06:34.000000000 -0400
30491 +++ linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-08-05 19:44:37.000000000 -0400
30492 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30493 }
30494
30495 if (buffer_pos == buffer_size) {
30496 - atomic_inc(&oprofile_stats.event_lost_overflow);
30497 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30498 return;
30499 }
30500
30501 diff -urNp linux-2.6.39.4/drivers/oprofile/oprof.c linux-2.6.39.4/drivers/oprofile/oprof.c
30502 --- linux-2.6.39.4/drivers/oprofile/oprof.c 2011-05-19 00:06:34.000000000 -0400
30503 +++ linux-2.6.39.4/drivers/oprofile/oprof.c 2011-08-05 19:44:37.000000000 -0400
30504 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30505 if (oprofile_ops.switch_events())
30506 return;
30507
30508 - atomic_inc(&oprofile_stats.multiplex_counter);
30509 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30510 start_switch_worker();
30511 }
30512
30513 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofilefs.c linux-2.6.39.4/drivers/oprofile/oprofilefs.c
30514 --- linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-05-19 00:06:34.000000000 -0400
30515 +++ linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-08-05 19:44:37.000000000 -0400
30516 @@ -186,7 +186,7 @@ static const struct file_operations atom
30517
30518
30519 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30520 - char const *name, atomic_t *val)
30521 + char const *name, atomic_unchecked_t *val)
30522 {
30523 return __oprofilefs_create_file(sb, root, name,
30524 &atomic_ro_fops, 0444, val);
30525 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.c linux-2.6.39.4/drivers/oprofile/oprofile_stats.c
30526 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-05-19 00:06:34.000000000 -0400
30527 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-08-05 19:44:37.000000000 -0400
30528 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30529 cpu_buf->sample_invalid_eip = 0;
30530 }
30531
30532 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30533 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30534 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30535 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30536 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30537 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30538 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30539 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30540 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30541 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30542 }
30543
30544
30545 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.h linux-2.6.39.4/drivers/oprofile/oprofile_stats.h
30546 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-05-19 00:06:34.000000000 -0400
30547 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-08-05 19:44:37.000000000 -0400
30548 @@ -13,11 +13,11 @@
30549 #include <asm/atomic.h>
30550
30551 struct oprofile_stat_struct {
30552 - atomic_t sample_lost_no_mm;
30553 - atomic_t sample_lost_no_mapping;
30554 - atomic_t bt_lost_no_mapping;
30555 - atomic_t event_lost_overflow;
30556 - atomic_t multiplex_counter;
30557 + atomic_unchecked_t sample_lost_no_mm;
30558 + atomic_unchecked_t sample_lost_no_mapping;
30559 + atomic_unchecked_t bt_lost_no_mapping;
30560 + atomic_unchecked_t event_lost_overflow;
30561 + atomic_unchecked_t multiplex_counter;
30562 };
30563
30564 extern struct oprofile_stat_struct oprofile_stats;
30565 diff -urNp linux-2.6.39.4/drivers/parport/procfs.c linux-2.6.39.4/drivers/parport/procfs.c
30566 --- linux-2.6.39.4/drivers/parport/procfs.c 2011-05-19 00:06:34.000000000 -0400
30567 +++ linux-2.6.39.4/drivers/parport/procfs.c 2011-08-05 19:44:37.000000000 -0400
30568 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30569
30570 *ppos += len;
30571
30572 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30573 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30574 }
30575
30576 #ifdef CONFIG_PARPORT_1284
30577 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30578
30579 *ppos += len;
30580
30581 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30582 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30583 }
30584 #endif /* IEEE1284.3 support. */
30585
30586 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h
30587 --- linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-05-19 00:06:34.000000000 -0400
30588 +++ linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:34:06.000000000 -0400
30589 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30590 int (*hardware_test) (struct slot* slot, u32 value);
30591 u8 (*get_power) (struct slot* slot);
30592 int (*set_power) (struct slot* slot, int value);
30593 -};
30594 +} __no_const;
30595
30596 struct cpci_hp_controller {
30597 unsigned int irq;
30598 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c
30599 --- linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-05-19 00:06:34.000000000 -0400
30600 +++ linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-05 19:44:37.000000000 -0400
30601 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30602
30603 void compaq_nvram_init (void __iomem *rom_start)
30604 {
30605 +
30606 +#ifndef CONFIG_PAX_KERNEXEC
30607 if (rom_start) {
30608 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30609 }
30610 +#endif
30611 +
30612 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30613
30614 /* initialize our int15 lock */
30615 diff -urNp linux-2.6.39.4/drivers/pci/pcie/aspm.c linux-2.6.39.4/drivers/pci/pcie/aspm.c
30616 --- linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-05-19 00:06:34.000000000 -0400
30617 +++ linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-08-05 19:44:37.000000000 -0400
30618 @@ -27,9 +27,9 @@
30619 #define MODULE_PARAM_PREFIX "pcie_aspm."
30620
30621 /* Note: those are not register definitions */
30622 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30623 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30624 -#define ASPM_STATE_L1 (4) /* L1 state */
30625 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30626 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30627 +#define ASPM_STATE_L1 (4U) /* L1 state */
30628 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30629 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30630
30631 diff -urNp linux-2.6.39.4/drivers/pci/probe.c linux-2.6.39.4/drivers/pci/probe.c
30632 --- linux-2.6.39.4/drivers/pci/probe.c 2011-05-19 00:06:34.000000000 -0400
30633 +++ linux-2.6.39.4/drivers/pci/probe.c 2011-08-05 20:34:06.000000000 -0400
30634 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
30635 return ret;
30636 }
30637
30638 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
30639 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
30640 struct device_attribute *attr,
30641 char *buf)
30642 {
30643 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
30644 }
30645
30646 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
30647 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
30648 struct device_attribute *attr,
30649 char *buf)
30650 {
30651 @@ -165,7 +165,7 @@ int __pci_read_base(struct pci_dev *dev,
30652 u32 l, sz, mask;
30653 u16 orig_cmd;
30654
30655 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30656 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30657
30658 if (!dev->mmio_always_on) {
30659 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30660 diff -urNp linux-2.6.39.4/drivers/pci/proc.c linux-2.6.39.4/drivers/pci/proc.c
30661 --- linux-2.6.39.4/drivers/pci/proc.c 2011-05-19 00:06:34.000000000 -0400
30662 +++ linux-2.6.39.4/drivers/pci/proc.c 2011-08-05 19:44:37.000000000 -0400
30663 @@ -476,7 +476,16 @@ static const struct file_operations proc
30664 static int __init pci_proc_init(void)
30665 {
30666 struct pci_dev *dev = NULL;
30667 +
30668 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30669 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30670 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30671 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30672 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30673 +#endif
30674 +#else
30675 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30676 +#endif
30677 proc_create("devices", 0, proc_bus_pci_dir,
30678 &proc_bus_pci_dev_operations);
30679 proc_initialized = 1;
30680 diff -urNp linux-2.6.39.4/drivers/pci/xen-pcifront.c linux-2.6.39.4/drivers/pci/xen-pcifront.c
30681 --- linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-05-19 00:06:34.000000000 -0400
30682 +++ linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-08-05 20:34:06.000000000 -0400
30683 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30684 struct pcifront_sd *sd = bus->sysdata;
30685 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30686
30687 + pax_track_stack();
30688 +
30689 if (verbose_request)
30690 dev_info(&pdev->xdev->dev,
30691 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30692 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30693 struct pcifront_sd *sd = bus->sysdata;
30694 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30695
30696 + pax_track_stack();
30697 +
30698 if (verbose_request)
30699 dev_info(&pdev->xdev->dev,
30700 "write dev=%04x:%02x:%02x.%01x - "
30701 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30702 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30703 struct msi_desc *entry;
30704
30705 + pax_track_stack();
30706 +
30707 if (nvec > SH_INFO_MAX_VEC) {
30708 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30709 " Increase SH_INFO_MAX_VEC.\n", nvec);
30710 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30711 struct pcifront_sd *sd = dev->bus->sysdata;
30712 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30713
30714 + pax_track_stack();
30715 +
30716 err = do_pci_op(pdev, &op);
30717
30718 /* What should do for error ? */
30719 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30720 struct pcifront_sd *sd = dev->bus->sysdata;
30721 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30722
30723 + pax_track_stack();
30724 +
30725 err = do_pci_op(pdev, &op);
30726 if (likely(!err)) {
30727 vector[0] = op.value;
30728 diff -urNp linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c
30729 --- linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-05-19 00:06:34.000000000 -0400
30730 +++ linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:34:06.000000000 -0400
30731 @@ -2109,7 +2109,7 @@ static int hotkey_mask_get(void)
30732 return 0;
30733 }
30734
30735 -void static hotkey_mask_warn_incomplete_mask(void)
30736 +static void hotkey_mask_warn_incomplete_mask(void)
30737 {
30738 /* log only what the user can fix... */
30739 const u32 wantedmask = hotkey_driver_mask &
30740 diff -urNp linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c
30741 --- linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-05-19 00:06:34.000000000 -0400
30742 +++ linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-05 19:44:37.000000000 -0400
30743 @@ -59,7 +59,7 @@ do { \
30744 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30745 } while(0)
30746
30747 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30748 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30749 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30750
30751 /*
30752 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30753
30754 cpu = get_cpu();
30755 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30756 +
30757 + pax_open_kernel();
30758 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30759 + pax_close_kernel();
30760
30761 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30762 spin_lock_irqsave(&pnp_bios_lock, flags);
30763 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30764 :"memory");
30765 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30766
30767 + pax_open_kernel();
30768 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30769 + pax_close_kernel();
30770 +
30771 put_cpu();
30772
30773 /* If we get here and this is set then the PnP BIOS faulted on us. */
30774 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30775 return status;
30776 }
30777
30778 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30779 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30780 {
30781 int i;
30782
30783 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30784 pnp_bios_callpoint.offset = header->fields.pm16offset;
30785 pnp_bios_callpoint.segment = PNP_CS16;
30786
30787 + pax_open_kernel();
30788 +
30789 for_each_possible_cpu(i) {
30790 struct desc_struct *gdt = get_cpu_gdt_table(i);
30791 if (!gdt)
30792 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30793 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30794 (unsigned long)__va(header->fields.pm16dseg));
30795 }
30796 +
30797 + pax_close_kernel();
30798 }
30799 diff -urNp linux-2.6.39.4/drivers/pnp/resource.c linux-2.6.39.4/drivers/pnp/resource.c
30800 --- linux-2.6.39.4/drivers/pnp/resource.c 2011-05-19 00:06:34.000000000 -0400
30801 +++ linux-2.6.39.4/drivers/pnp/resource.c 2011-08-05 19:44:37.000000000 -0400
30802 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30803 return 1;
30804
30805 /* check if the resource is valid */
30806 - if (*irq < 0 || *irq > 15)
30807 + if (*irq > 15)
30808 return 0;
30809
30810 /* check if the resource is reserved */
30811 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30812 return 1;
30813
30814 /* check if the resource is valid */
30815 - if (*dma < 0 || *dma == 4 || *dma > 7)
30816 + if (*dma == 4 || *dma > 7)
30817 return 0;
30818
30819 /* check if the resource is reserved */
30820 diff -urNp linux-2.6.39.4/drivers/power/bq27x00_battery.c linux-2.6.39.4/drivers/power/bq27x00_battery.c
30821 --- linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-05-19 00:06:34.000000000 -0400
30822 +++ linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-08-05 20:34:06.000000000 -0400
30823 @@ -66,7 +66,7 @@
30824 struct bq27x00_device_info;
30825 struct bq27x00_access_methods {
30826 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30827 -};
30828 +} __no_const;
30829
30830 enum bq27x00_chip { BQ27000, BQ27500 };
30831
30832 diff -urNp linux-2.6.39.4/drivers/regulator/max8660.c linux-2.6.39.4/drivers/regulator/max8660.c
30833 --- linux-2.6.39.4/drivers/regulator/max8660.c 2011-05-19 00:06:34.000000000 -0400
30834 +++ linux-2.6.39.4/drivers/regulator/max8660.c 2011-08-05 20:34:06.000000000 -0400
30835 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30836 max8660->shadow_regs[MAX8660_OVER1] = 5;
30837 } else {
30838 /* Otherwise devices can be toggled via software */
30839 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30840 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30841 + pax_open_kernel();
30842 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30843 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30844 + pax_close_kernel();
30845 }
30846
30847 /*
30848 diff -urNp linux-2.6.39.4/drivers/regulator/mc13892-regulator.c linux-2.6.39.4/drivers/regulator/mc13892-regulator.c
30849 --- linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-05-19 00:06:34.000000000 -0400
30850 +++ linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-08-05 20:34:06.000000000 -0400
30851 @@ -560,10 +560,12 @@ static int __devinit mc13892_regulator_p
30852 }
30853 mc13xxx_unlock(mc13892);
30854
30855 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30856 + pax_open_kernel();
30857 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30858 = mc13892_vcam_set_mode;
30859 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30860 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30861 = mc13892_vcam_get_mode;
30862 + pax_close_kernel();
30863 for (i = 0; i < pdata->num_regulators; i++) {
30864 init_data = &pdata->regulators[i];
30865 priv->regulators[i] = regulator_register(
30866 diff -urNp linux-2.6.39.4/drivers/rtc/rtc-dev.c linux-2.6.39.4/drivers/rtc/rtc-dev.c
30867 --- linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-05-19 00:06:34.000000000 -0400
30868 +++ linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-08-05 19:44:37.000000000 -0400
30869 @@ -14,6 +14,7 @@
30870 #include <linux/module.h>
30871 #include <linux/rtc.h>
30872 #include <linux/sched.h>
30873 +#include <linux/grsecurity.h>
30874 #include "rtc-core.h"
30875
30876 static dev_t rtc_devt;
30877 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30878 if (copy_from_user(&tm, uarg, sizeof(tm)))
30879 return -EFAULT;
30880
30881 + gr_log_timechange();
30882 +
30883 return rtc_set_time(rtc, &tm);
30884
30885 case RTC_PIE_ON:
30886 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h
30887 --- linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-05-19 00:06:34.000000000 -0400
30888 +++ linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:34:06.000000000 -0400
30889 @@ -492,7 +492,7 @@ struct adapter_ops
30890 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30891 /* Administrative operations */
30892 int (*adapter_comm)(struct aac_dev * dev, int comm);
30893 -};
30894 +} __no_const;
30895
30896 /*
30897 * Define which interrupt handler needs to be installed
30898 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c
30899 --- linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-05-19 00:06:34.000000000 -0400
30900 +++ linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-08-05 19:44:37.000000000 -0400
30901 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30902 u32 actual_fibsize64, actual_fibsize = 0;
30903 int i;
30904
30905 + pax_track_stack();
30906
30907 if (dev->in_reset) {
30908 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30909 diff -urNp linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c
30910 --- linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-05-19 00:06:34.000000000 -0400
30911 +++ linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-08-05 19:44:37.000000000 -0400
30912 @@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(stru
30913 flash_error_table[i].reason);
30914 }
30915
30916 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
30917 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
30918 asd_show_update_bios, asd_store_update_bios);
30919
30920 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
30921 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfad.c linux-2.6.39.4/drivers/scsi/bfa/bfad.c
30922 --- linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-05-19 00:06:34.000000000 -0400
30923 +++ linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-08-05 19:44:37.000000000 -0400
30924 @@ -1027,6 +1027,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30925 struct bfad_vport_s *vport, *vport_new;
30926 struct bfa_fcs_driver_info_s driver_info;
30927
30928 + pax_track_stack();
30929 +
30930 /* Fill the driver_info info to fcs*/
30931 memset(&driver_info, 0, sizeof(driver_info));
30932 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30933 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c
30934 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-05-19 00:06:34.000000000 -0400
30935 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-05 19:44:37.000000000 -0400
30936 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30937 u16 len, count;
30938 u16 templen;
30939
30940 + pax_track_stack();
30941 +
30942 /*
30943 * get hba attributes
30944 */
30945 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30946 u8 count = 0;
30947 u16 templen;
30948
30949 + pax_track_stack();
30950 +
30951 /*
30952 * get port attributes
30953 */
30954 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c
30955 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-05-19 00:06:34.000000000 -0400
30956 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-05 19:44:37.000000000 -0400
30957 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30958 struct fc_rpsc_speed_info_s speeds;
30959 struct bfa_port_attr_s pport_attr;
30960
30961 + pax_track_stack();
30962 +
30963 bfa_trc(port->fcs, rx_fchs->s_id);
30964 bfa_trc(port->fcs, rx_fchs->d_id);
30965
30966 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa.h linux-2.6.39.4/drivers/scsi/bfa/bfa.h
30967 --- linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-05-19 00:06:34.000000000 -0400
30968 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-08-05 20:34:06.000000000 -0400
30969 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30970 u32 *nvecs, u32 *maxvec);
30971 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30972 u32 *end);
30973 -};
30974 +} __no_const;
30975 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30976
30977 struct bfa_iocfc_s {
30978 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h
30979 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-05-19 00:06:34.000000000 -0400
30980 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:34:06.000000000 -0400
30981 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30982 bfa_ioc_disable_cbfn_t disable_cbfn;
30983 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30984 bfa_ioc_reset_cbfn_t reset_cbfn;
30985 -};
30986 +} __no_const;
30987
30988 /*
30989 * Heartbeat failure notification queue element.
30990 @@ -267,7 +267,7 @@ struct bfa_ioc_hwif_s {
30991 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30992 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30993 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30994 -};
30995 +} __no_const;
30996
30997 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30998 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30999 diff -urNp linux-2.6.39.4/drivers/scsi/BusLogic.c linux-2.6.39.4/drivers/scsi/BusLogic.c
31000 --- linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-05-19 00:06:34.000000000 -0400
31001 +++ linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-08-05 19:44:37.000000000 -0400
31002 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
31003 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
31004 *PrototypeHostAdapter)
31005 {
31006 + pax_track_stack();
31007 +
31008 /*
31009 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
31010 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
31011 diff -urNp linux-2.6.39.4/drivers/scsi/dpt_i2o.c linux-2.6.39.4/drivers/scsi/dpt_i2o.c
31012 --- linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-05-19 00:06:34.000000000 -0400
31013 +++ linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-08-05 19:44:37.000000000 -0400
31014 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
31015 dma_addr_t addr;
31016 ulong flags = 0;
31017
31018 + pax_track_stack();
31019 +
31020 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
31021 // get user msg size in u32s
31022 if(get_user(size, &user_msg[0])){
31023 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31024 s32 rcode;
31025 dma_addr_t addr;
31026
31027 + pax_track_stack();
31028 +
31029 memset(msg, 0 , sizeof(msg));
31030 len = scsi_bufflen(cmd);
31031 direction = 0x00000000;
31032 diff -urNp linux-2.6.39.4/drivers/scsi/eata.c linux-2.6.39.4/drivers/scsi/eata.c
31033 --- linux-2.6.39.4/drivers/scsi/eata.c 2011-05-19 00:06:34.000000000 -0400
31034 +++ linux-2.6.39.4/drivers/scsi/eata.c 2011-08-05 19:44:37.000000000 -0400
31035 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31036 struct hostdata *ha;
31037 char name[16];
31038
31039 + pax_track_stack();
31040 +
31041 sprintf(name, "%s%d", driver_name, j);
31042
31043 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31044 diff -urNp linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c
31045 --- linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-05-19 00:06:34.000000000 -0400
31046 +++ linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-05 20:34:06.000000000 -0400
31047 @@ -2458,6 +2458,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31048 } buf;
31049 int rc;
31050
31051 + pax_track_stack();
31052 +
31053 fiph = (struct fip_header *)skb->data;
31054 sub = fiph->fip_subcode;
31055
31056 diff -urNp linux-2.6.39.4/drivers/scsi/gdth.c linux-2.6.39.4/drivers/scsi/gdth.c
31057 --- linux-2.6.39.4/drivers/scsi/gdth.c 2011-05-19 00:06:34.000000000 -0400
31058 +++ linux-2.6.39.4/drivers/scsi/gdth.c 2011-08-05 19:44:37.000000000 -0400
31059 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31060 unsigned long flags;
31061 gdth_ha_str *ha;
31062
31063 + pax_track_stack();
31064 +
31065 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31066 return -EFAULT;
31067 ha = gdth_find_ha(ldrv.ionode);
31068 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31069 gdth_ha_str *ha;
31070 int rval;
31071
31072 + pax_track_stack();
31073 +
31074 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31075 res.number >= MAX_HDRIVES)
31076 return -EFAULT;
31077 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31078 gdth_ha_str *ha;
31079 int rval;
31080
31081 + pax_track_stack();
31082 +
31083 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31084 return -EFAULT;
31085 ha = gdth_find_ha(gen.ionode);
31086 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31087 int i;
31088 gdth_cmd_str gdtcmd;
31089 char cmnd[MAX_COMMAND_SIZE];
31090 +
31091 + pax_track_stack();
31092 +
31093 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31094
31095 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31096 diff -urNp linux-2.6.39.4/drivers/scsi/gdth_proc.c linux-2.6.39.4/drivers/scsi/gdth_proc.c
31097 --- linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-05-19 00:06:34.000000000 -0400
31098 +++ linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-08-05 19:44:37.000000000 -0400
31099 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31100 u64 paddr;
31101
31102 char cmnd[MAX_COMMAND_SIZE];
31103 +
31104 + pax_track_stack();
31105 +
31106 memset(cmnd, 0xff, 12);
31107 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
31108
31109 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
31110 gdth_hget_str *phg;
31111 char cmnd[MAX_COMMAND_SIZE];
31112
31113 + pax_track_stack();
31114 +
31115 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
31116 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
31117 if (!gdtcmd || !estr)
31118 diff -urNp linux-2.6.39.4/drivers/scsi/hosts.c linux-2.6.39.4/drivers/scsi/hosts.c
31119 --- linux-2.6.39.4/drivers/scsi/hosts.c 2011-05-19 00:06:34.000000000 -0400
31120 +++ linux-2.6.39.4/drivers/scsi/hosts.c 2011-08-05 19:44:37.000000000 -0400
31121 @@ -42,7 +42,7 @@
31122 #include "scsi_logging.h"
31123
31124
31125 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
31126 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
31127
31128
31129 static void scsi_host_cls_release(struct device *dev)
31130 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
31131 * subtract one because we increment first then return, but we need to
31132 * know what the next host number was before increment
31133 */
31134 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
31135 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
31136 shost->dma_channel = 0xff;
31137
31138 /* These three are default values which can be overridden */
31139 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.c linux-2.6.39.4/drivers/scsi/hpsa.c
31140 --- linux-2.6.39.4/drivers/scsi/hpsa.c 2011-05-19 00:06:34.000000000 -0400
31141 +++ linux-2.6.39.4/drivers/scsi/hpsa.c 2011-08-05 20:34:06.000000000 -0400
31142 @@ -469,7 +469,7 @@ static inline u32 next_command(struct ct
31143 u32 a;
31144
31145 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31146 - return h->access.command_completed(h);
31147 + return h->access->command_completed(h);
31148
31149 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31150 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31151 @@ -2889,7 +2889,7 @@ static void start_io(struct ctlr_info *h
31152 while (!list_empty(&h->reqQ)) {
31153 c = list_entry(h->reqQ.next, struct CommandList, list);
31154 /* can't do anything if fifo is full */
31155 - if ((h->access.fifo_full(h))) {
31156 + if ((h->access->fifo_full(h))) {
31157 dev_warn(&h->pdev->dev, "fifo full\n");
31158 break;
31159 }
31160 @@ -2899,7 +2899,7 @@ static void start_io(struct ctlr_info *h
31161 h->Qdepth--;
31162
31163 /* Tell the controller execute command */
31164 - h->access.submit_command(h, c);
31165 + h->access->submit_command(h, c);
31166
31167 /* Put job onto the completed Q */
31168 addQ(&h->cmpQ, c);
31169 @@ -2908,17 +2908,17 @@ static void start_io(struct ctlr_info *h
31170
31171 static inline unsigned long get_next_completion(struct ctlr_info *h)
31172 {
31173 - return h->access.command_completed(h);
31174 + return h->access->command_completed(h);
31175 }
31176
31177 static inline bool interrupt_pending(struct ctlr_info *h)
31178 {
31179 - return h->access.intr_pending(h);
31180 + return h->access->intr_pending(h);
31181 }
31182
31183 static inline long interrupt_not_for_us(struct ctlr_info *h)
31184 {
31185 - return (h->access.intr_pending(h) == 0) ||
31186 + return (h->access->intr_pending(h) == 0) ||
31187 (h->interrupts_enabled == 0);
31188 }
31189
31190 @@ -3684,7 +3684,7 @@ static int __devinit hpsa_pci_init(struc
31191 if (prod_index < 0)
31192 return -ENODEV;
31193 h->product_name = products[prod_index].product_name;
31194 - h->access = *(products[prod_index].access);
31195 + h->access = products[prod_index].access;
31196
31197 if (hpsa_board_disabled(h->pdev)) {
31198 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31199 @@ -3845,7 +3845,7 @@ static int __devinit hpsa_init_one(struc
31200 }
31201
31202 /* make sure the board interrupts are off */
31203 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31204 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31205
31206 if (h->msix_vector || h->msi_vector)
31207 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
31208 @@ -3892,7 +3892,7 @@ static int __devinit hpsa_init_one(struc
31209 hpsa_scsi_setup(h);
31210
31211 /* Turn the interrupts on so we can service requests */
31212 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31213 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31214
31215 hpsa_put_ctlr_into_performant_mode(h);
31216 hpsa_hba_inquiry(h);
31217 @@ -3955,7 +3955,7 @@ static void hpsa_shutdown(struct pci_dev
31218 * To write all data in the battery backed cache to disks
31219 */
31220 hpsa_flush_cache(h);
31221 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31222 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31223 free_irq(h->intr[h->intr_mode], h);
31224 #ifdef CONFIG_PCI_MSI
31225 if (h->msix_vector)
31226 @@ -4118,7 +4118,7 @@ static __devinit void hpsa_enter_perform
31227 return;
31228 }
31229 /* Change the access methods to the performant access methods */
31230 - h->access = SA5_performant_access;
31231 + h->access = &SA5_performant_access;
31232 h->transMethod = CFGTBL_Trans_Performant;
31233 }
31234
31235 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.h linux-2.6.39.4/drivers/scsi/hpsa.h
31236 --- linux-2.6.39.4/drivers/scsi/hpsa.h 2011-05-19 00:06:34.000000000 -0400
31237 +++ linux-2.6.39.4/drivers/scsi/hpsa.h 2011-08-05 20:34:06.000000000 -0400
31238 @@ -73,7 +73,7 @@ struct ctlr_info {
31239 unsigned int msix_vector;
31240 unsigned int msi_vector;
31241 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31242 - struct access_method access;
31243 + struct access_method *access;
31244
31245 /* queue and queue Info */
31246 struct list_head reqQ;
31247 diff -urNp linux-2.6.39.4/drivers/scsi/ips.h linux-2.6.39.4/drivers/scsi/ips.h
31248 --- linux-2.6.39.4/drivers/scsi/ips.h 2011-05-19 00:06:34.000000000 -0400
31249 +++ linux-2.6.39.4/drivers/scsi/ips.h 2011-08-05 20:34:06.000000000 -0400
31250 @@ -1027,7 +1027,7 @@ typedef struct {
31251 int (*intr)(struct ips_ha *);
31252 void (*enableint)(struct ips_ha *);
31253 uint32_t (*statupd)(struct ips_ha *);
31254 -} ips_hw_func_t;
31255 +} __no_const ips_hw_func_t;
31256
31257 typedef struct ips_ha {
31258 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31259 diff -urNp linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c
31260 --- linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-05-19 00:06:34.000000000 -0400
31261 +++ linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-08-05 19:44:37.000000000 -0400
31262 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31263 * all together if not used XXX
31264 */
31265 struct {
31266 - atomic_t no_free_exch;
31267 - atomic_t no_free_exch_xid;
31268 - atomic_t xid_not_found;
31269 - atomic_t xid_busy;
31270 - atomic_t seq_not_found;
31271 - atomic_t non_bls_resp;
31272 + atomic_unchecked_t no_free_exch;
31273 + atomic_unchecked_t no_free_exch_xid;
31274 + atomic_unchecked_t xid_not_found;
31275 + atomic_unchecked_t xid_busy;
31276 + atomic_unchecked_t seq_not_found;
31277 + atomic_unchecked_t non_bls_resp;
31278 } stats;
31279 };
31280
31281 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31282 /* allocate memory for exchange */
31283 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31284 if (!ep) {
31285 - atomic_inc(&mp->stats.no_free_exch);
31286 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31287 goto out;
31288 }
31289 memset(ep, 0, sizeof(*ep));
31290 @@ -761,7 +761,7 @@ out:
31291 return ep;
31292 err:
31293 spin_unlock_bh(&pool->lock);
31294 - atomic_inc(&mp->stats.no_free_exch_xid);
31295 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31296 mempool_free(ep, mp->ep_pool);
31297 return NULL;
31298 }
31299 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31300 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31301 ep = fc_exch_find(mp, xid);
31302 if (!ep) {
31303 - atomic_inc(&mp->stats.xid_not_found);
31304 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31305 reject = FC_RJT_OX_ID;
31306 goto out;
31307 }
31308 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31309 ep = fc_exch_find(mp, xid);
31310 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31311 if (ep) {
31312 - atomic_inc(&mp->stats.xid_busy);
31313 + atomic_inc_unchecked(&mp->stats.xid_busy);
31314 reject = FC_RJT_RX_ID;
31315 goto rel;
31316 }
31317 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31318 }
31319 xid = ep->xid; /* get our XID */
31320 } else if (!ep) {
31321 - atomic_inc(&mp->stats.xid_not_found);
31322 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31323 reject = FC_RJT_RX_ID; /* XID not found */
31324 goto out;
31325 }
31326 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31327 } else {
31328 sp = &ep->seq;
31329 if (sp->id != fh->fh_seq_id) {
31330 - atomic_inc(&mp->stats.seq_not_found);
31331 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31332 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31333 goto rel;
31334 }
31335 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31336
31337 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31338 if (!ep) {
31339 - atomic_inc(&mp->stats.xid_not_found);
31340 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31341 goto out;
31342 }
31343 if (ep->esb_stat & ESB_ST_COMPLETE) {
31344 - atomic_inc(&mp->stats.xid_not_found);
31345 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31346 goto rel;
31347 }
31348 if (ep->rxid == FC_XID_UNKNOWN)
31349 ep->rxid = ntohs(fh->fh_rx_id);
31350 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31351 - atomic_inc(&mp->stats.xid_not_found);
31352 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31353 goto rel;
31354 }
31355 if (ep->did != ntoh24(fh->fh_s_id) &&
31356 ep->did != FC_FID_FLOGI) {
31357 - atomic_inc(&mp->stats.xid_not_found);
31358 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31359 goto rel;
31360 }
31361 sof = fr_sof(fp);
31362 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31363 sp->ssb_stat |= SSB_ST_RESP;
31364 sp->id = fh->fh_seq_id;
31365 } else if (sp->id != fh->fh_seq_id) {
31366 - atomic_inc(&mp->stats.seq_not_found);
31367 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31368 goto rel;
31369 }
31370
31371 @@ -1479,9 +1479,9 @@ static void fc_exch_recv_resp(struct fc_
31372 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31373
31374 if (!sp)
31375 - atomic_inc(&mp->stats.xid_not_found);
31376 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31377 else
31378 - atomic_inc(&mp->stats.non_bls_resp);
31379 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31380
31381 fc_frame_free(fp);
31382 }
31383 diff -urNp linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c
31384 --- linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-05-19 00:06:34.000000000 -0400
31385 +++ linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-08-05 20:34:06.000000000 -0400
31386 @@ -314,7 +314,7 @@ static struct ata_port_operations sas_sa
31387 .postreset = ata_std_postreset,
31388 .error_handler = ata_std_error_handler,
31389 .post_internal_cmd = sas_ata_post_internal,
31390 - .qc_defer = ata_std_qc_defer,
31391 + .qc_defer = ata_std_qc_defer,
31392 .qc_prep = ata_noop_qc_prep,
31393 .qc_issue = sas_ata_qc_issue,
31394 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31395 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c
31396 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-19 00:06:34.000000000 -0400
31397 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-05 19:44:37.000000000 -0400
31398 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31399
31400 #include <linux/debugfs.h>
31401
31402 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31403 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31404 static unsigned long lpfc_debugfs_start_time = 0L;
31405
31406 /* iDiag */
31407 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31408 lpfc_debugfs_enable = 0;
31409
31410 len = 0;
31411 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31412 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31413 (lpfc_debugfs_max_disc_trc - 1);
31414 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31415 dtp = vport->disc_trc + i;
31416 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31417 lpfc_debugfs_enable = 0;
31418
31419 len = 0;
31420 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31421 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31422 (lpfc_debugfs_max_slow_ring_trc - 1);
31423 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31424 dtp = phba->slow_ring_trc + i;
31425 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31426 uint32_t *ptr;
31427 char buffer[1024];
31428
31429 + pax_track_stack();
31430 +
31431 off = 0;
31432 spin_lock_irq(&phba->hbalock);
31433
31434 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31435 !vport || !vport->disc_trc)
31436 return;
31437
31438 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31439 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31440 (lpfc_debugfs_max_disc_trc - 1);
31441 dtp = vport->disc_trc + index;
31442 dtp->fmt = fmt;
31443 dtp->data1 = data1;
31444 dtp->data2 = data2;
31445 dtp->data3 = data3;
31446 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31447 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31448 dtp->jif = jiffies;
31449 #endif
31450 return;
31451 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31452 !phba || !phba->slow_ring_trc)
31453 return;
31454
31455 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31456 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31457 (lpfc_debugfs_max_slow_ring_trc - 1);
31458 dtp = phba->slow_ring_trc + index;
31459 dtp->fmt = fmt;
31460 dtp->data1 = data1;
31461 dtp->data2 = data2;
31462 dtp->data3 = data3;
31463 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31464 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31465 dtp->jif = jiffies;
31466 #endif
31467 return;
31468 @@ -2145,7 +2147,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31469 "slow_ring buffer\n");
31470 goto debug_failed;
31471 }
31472 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31473 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31474 memset(phba->slow_ring_trc, 0,
31475 (sizeof(struct lpfc_debugfs_trc) *
31476 lpfc_debugfs_max_slow_ring_trc));
31477 @@ -2191,7 +2193,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31478 "buffer\n");
31479 goto debug_failed;
31480 }
31481 - atomic_set(&vport->disc_trc_cnt, 0);
31482 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31483
31484 snprintf(name, sizeof(name), "discovery_trace");
31485 vport->debug_disc_trc =
31486 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h
31487 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-05-19 00:06:34.000000000 -0400
31488 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-08-05 19:44:37.000000000 -0400
31489 @@ -419,7 +419,7 @@ struct lpfc_vport {
31490 struct dentry *debug_nodelist;
31491 struct dentry *vport_debugfs_root;
31492 struct lpfc_debugfs_trc *disc_trc;
31493 - atomic_t disc_trc_cnt;
31494 + atomic_unchecked_t disc_trc_cnt;
31495 #endif
31496 uint8_t stat_data_enabled;
31497 uint8_t stat_data_blocked;
31498 @@ -785,8 +785,8 @@ struct lpfc_hba {
31499 struct timer_list fabric_block_timer;
31500 unsigned long bit_flags;
31501 #define FABRIC_COMANDS_BLOCKED 0
31502 - atomic_t num_rsrc_err;
31503 - atomic_t num_cmd_success;
31504 + atomic_unchecked_t num_rsrc_err;
31505 + atomic_unchecked_t num_cmd_success;
31506 unsigned long last_rsrc_error_time;
31507 unsigned long last_ramp_down_time;
31508 unsigned long last_ramp_up_time;
31509 @@ -800,7 +800,7 @@ struct lpfc_hba {
31510 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31511 struct dentry *debug_slow_ring_trc;
31512 struct lpfc_debugfs_trc *slow_ring_trc;
31513 - atomic_t slow_ring_trc_cnt;
31514 + atomic_unchecked_t slow_ring_trc_cnt;
31515 /* iDiag debugfs sub-directory */
31516 struct dentry *idiag_root;
31517 struct dentry *idiag_pci_cfg;
31518 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c
31519 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-05-19 00:06:34.000000000 -0400
31520 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:34:06.000000000 -0400
31521 @@ -9535,8 +9535,10 @@ lpfc_init(void)
31522 printk(LPFC_COPYRIGHT "\n");
31523
31524 if (lpfc_enable_npiv) {
31525 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31526 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31527 + pax_open_kernel();
31528 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31529 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31530 + pax_close_kernel();
31531 }
31532 lpfc_transport_template =
31533 fc_attach_transport(&lpfc_transport_functions);
31534 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c
31535 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-19 00:06:34.000000000 -0400
31536 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-05 19:44:37.000000000 -0400
31537 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31538 uint32_t evt_posted;
31539
31540 spin_lock_irqsave(&phba->hbalock, flags);
31541 - atomic_inc(&phba->num_rsrc_err);
31542 + atomic_inc_unchecked(&phba->num_rsrc_err);
31543 phba->last_rsrc_error_time = jiffies;
31544
31545 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31546 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31547 unsigned long flags;
31548 struct lpfc_hba *phba = vport->phba;
31549 uint32_t evt_posted;
31550 - atomic_inc(&phba->num_cmd_success);
31551 + atomic_inc_unchecked(&phba->num_cmd_success);
31552
31553 if (vport->cfg_lun_queue_depth <= queue_depth)
31554 return;
31555 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31556 unsigned long num_rsrc_err, num_cmd_success;
31557 int i;
31558
31559 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31560 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31561 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31562 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31563
31564 vports = lpfc_create_vport_work_array(phba);
31565 if (vports != NULL)
31566 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31567 }
31568 }
31569 lpfc_destroy_vport_work_array(phba, vports);
31570 - atomic_set(&phba->num_rsrc_err, 0);
31571 - atomic_set(&phba->num_cmd_success, 0);
31572 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31573 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31574 }
31575
31576 /**
31577 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31578 }
31579 }
31580 lpfc_destroy_vport_work_array(phba, vports);
31581 - atomic_set(&phba->num_rsrc_err, 0);
31582 - atomic_set(&phba->num_cmd_success, 0);
31583 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31584 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31585 }
31586
31587 /**
31588 diff -urNp linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c
31589 --- linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-19 00:06:34.000000000 -0400
31590 +++ linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-05 19:44:37.000000000 -0400
31591 @@ -3510,6 +3510,8 @@ megaraid_cmm_register(adapter_t *adapter
31592 int rval;
31593 int i;
31594
31595 + pax_track_stack();
31596 +
31597 // Allocate memory for the base list of scb for management module.
31598 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31599
31600 diff -urNp linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c
31601 --- linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-05-19 00:06:34.000000000 -0400
31602 +++ linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-08-05 19:44:37.000000000 -0400
31603 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31604 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31605 int ret;
31606
31607 + pax_track_stack();
31608 +
31609 or = osd_start_request(od, GFP_KERNEL);
31610 if (!or)
31611 return -ENOMEM;
31612 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.c linux-2.6.39.4/drivers/scsi/pmcraid.c
31613 --- linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-05-19 00:06:34.000000000 -0400
31614 +++ linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-08-05 19:44:37.000000000 -0400
31615 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31616 res->scsi_dev = scsi_dev;
31617 scsi_dev->hostdata = res;
31618 res->change_detected = 0;
31619 - atomic_set(&res->read_failures, 0);
31620 - atomic_set(&res->write_failures, 0);
31621 + atomic_set_unchecked(&res->read_failures, 0);
31622 + atomic_set_unchecked(&res->write_failures, 0);
31623 rc = 0;
31624 }
31625 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31626 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31627
31628 /* If this was a SCSI read/write command keep count of errors */
31629 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31630 - atomic_inc(&res->read_failures);
31631 + atomic_inc_unchecked(&res->read_failures);
31632 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31633 - atomic_inc(&res->write_failures);
31634 + atomic_inc_unchecked(&res->write_failures);
31635
31636 if (!RES_IS_GSCSI(res->cfg_entry) &&
31637 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31638 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31639 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31640 * hrrq_id assigned here in queuecommand
31641 */
31642 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31643 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31644 pinstance->num_hrrq;
31645 cmd->cmd_done = pmcraid_io_done;
31646
31647 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31648 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31649 * hrrq_id assigned here in queuecommand
31650 */
31651 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31652 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31653 pinstance->num_hrrq;
31654
31655 if (request_size) {
31656 @@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(stru
31657
31658 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31659 /* add resources only after host is added into system */
31660 - if (!atomic_read(&pinstance->expose_resources))
31661 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31662 return;
31663
31664 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31665 @@ -5329,8 +5329,8 @@ static int __devinit pmcraid_init_instan
31666 init_waitqueue_head(&pinstance->reset_wait_q);
31667
31668 atomic_set(&pinstance->outstanding_cmds, 0);
31669 - atomic_set(&pinstance->last_message_id, 0);
31670 - atomic_set(&pinstance->expose_resources, 0);
31671 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31672 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31673
31674 INIT_LIST_HEAD(&pinstance->free_res_q);
31675 INIT_LIST_HEAD(&pinstance->used_res_q);
31676 @@ -6045,7 +6045,7 @@ static int __devinit pmcraid_probe(
31677 /* Schedule worker thread to handle CCN and take care of adding and
31678 * removing devices to OS
31679 */
31680 - atomic_set(&pinstance->expose_resources, 1);
31681 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31682 schedule_work(&pinstance->worker_q);
31683 return rc;
31684
31685 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.h linux-2.6.39.4/drivers/scsi/pmcraid.h
31686 --- linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-05-19 00:06:34.000000000 -0400
31687 +++ linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-08-05 19:44:37.000000000 -0400
31688 @@ -750,7 +750,7 @@ struct pmcraid_instance {
31689 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31690
31691 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31692 - atomic_t last_message_id;
31693 + atomic_unchecked_t last_message_id;
31694
31695 /* configuration table */
31696 struct pmcraid_config_table *cfg_table;
31697 @@ -779,7 +779,7 @@ struct pmcraid_instance {
31698 atomic_t outstanding_cmds;
31699
31700 /* should add/delete resources to mid-layer now ?*/
31701 - atomic_t expose_resources;
31702 + atomic_unchecked_t expose_resources;
31703
31704
31705
31706 @@ -815,8 +815,8 @@ struct pmcraid_resource_entry {
31707 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31708 };
31709 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31710 - atomic_t read_failures; /* count of failed READ commands */
31711 - atomic_t write_failures; /* count of failed WRITE commands */
31712 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31713 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31714
31715 /* To indicate add/delete/modify during CCN */
31716 u8 change_detected;
31717 diff -urNp linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h
31718 --- linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-05-19 00:06:34.000000000 -0400
31719 +++ linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:34:06.000000000 -0400
31720 @@ -2236,7 +2236,7 @@ struct isp_operations {
31721 int (*get_flash_version) (struct scsi_qla_host *, void *);
31722 int (*start_scsi) (srb_t *);
31723 int (*abort_isp) (struct scsi_qla_host *);
31724 -};
31725 +} __no_const;
31726
31727 /* MSI-X Support *************************************************************/
31728
31729 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h
31730 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-05-19 00:06:34.000000000 -0400
31731 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-05 19:44:37.000000000 -0400
31732 @@ -256,7 +256,7 @@ struct ddb_entry {
31733 atomic_t retry_relogin_timer; /* Min Time between relogins
31734 * (4000 only) */
31735 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31736 - atomic_t relogin_retry_count; /* Num of times relogin has been
31737 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31738 * retried */
31739
31740 uint16_t port;
31741 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c
31742 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-05-19 00:06:34.000000000 -0400
31743 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-05 19:44:37.000000000 -0400
31744 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31745 ddb_entry->fw_ddb_index = fw_ddb_index;
31746 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31747 atomic_set(&ddb_entry->relogin_timer, 0);
31748 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31749 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31750 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31751 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31752 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31753 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31754 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31755 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31756 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31757 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31758 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31759 atomic_set(&ddb_entry->relogin_timer, 0);
31760 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31761 iscsi_unblock_session(ddb_entry->sess);
31762 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c
31763 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-05-19 00:06:34.000000000 -0400
31764 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-05 19:44:37.000000000 -0400
31765 @@ -802,13 +802,13 @@ static void qla4xxx_timer(struct scsi_ql
31766 ddb_entry->fw_ddb_device_state ==
31767 DDB_DS_SESSION_FAILED) {
31768 /* Reset retry relogin timer */
31769 - atomic_inc(&ddb_entry->relogin_retry_count);
31770 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31771 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31772 " timed out-retrying"
31773 " relogin (%d)\n",
31774 ha->host_no,
31775 ddb_entry->fw_ddb_index,
31776 - atomic_read(&ddb_entry->
31777 + atomic_read_unchecked(&ddb_entry->
31778 relogin_retry_count))
31779 );
31780 start_dpc++;
31781 diff -urNp linux-2.6.39.4/drivers/scsi/scsi.c linux-2.6.39.4/drivers/scsi/scsi.c
31782 --- linux-2.6.39.4/drivers/scsi/scsi.c 2011-05-19 00:06:34.000000000 -0400
31783 +++ linux-2.6.39.4/drivers/scsi/scsi.c 2011-08-05 19:44:37.000000000 -0400
31784 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31785 unsigned long timeout;
31786 int rtn = 0;
31787
31788 - atomic_inc(&cmd->device->iorequest_cnt);
31789 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31790
31791 /* check if the device is still usable */
31792 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31793 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_debug.c linux-2.6.39.4/drivers/scsi/scsi_debug.c
31794 --- linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-05-19 00:06:34.000000000 -0400
31795 +++ linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-08-05 19:44:37.000000000 -0400
31796 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31797 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31798 unsigned char *cmd = (unsigned char *)scp->cmnd;
31799
31800 + pax_track_stack();
31801 +
31802 if ((errsts = check_readiness(scp, 1, devip)))
31803 return errsts;
31804 memset(arr, 0, sizeof(arr));
31805 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31806 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31807 unsigned char *cmd = (unsigned char *)scp->cmnd;
31808
31809 + pax_track_stack();
31810 +
31811 if ((errsts = check_readiness(scp, 1, devip)))
31812 return errsts;
31813 memset(arr, 0, sizeof(arr));
31814 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_lib.c linux-2.6.39.4/drivers/scsi/scsi_lib.c
31815 --- linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-05-19 00:06:34.000000000 -0400
31816 +++ linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-08-05 19:44:37.000000000 -0400
31817 @@ -1410,7 +1410,7 @@ static void scsi_kill_request(struct req
31818 shost = sdev->host;
31819 scsi_init_cmd_errh(cmd);
31820 cmd->result = DID_NO_CONNECT << 16;
31821 - atomic_inc(&cmd->device->iorequest_cnt);
31822 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31823
31824 /*
31825 * SCSI request completion path will do scsi_device_unbusy(),
31826 @@ -1436,9 +1436,9 @@ static void scsi_softirq_done(struct req
31827
31828 INIT_LIST_HEAD(&cmd->eh_entry);
31829
31830 - atomic_inc(&cmd->device->iodone_cnt);
31831 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31832 if (cmd->result)
31833 - atomic_inc(&cmd->device->ioerr_cnt);
31834 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31835
31836 disposition = scsi_decide_disposition(cmd);
31837 if (disposition != SUCCESS &&
31838 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_sysfs.c linux-2.6.39.4/drivers/scsi/scsi_sysfs.c
31839 --- linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:22.000000000 -0400
31840 +++ linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-08-05 19:44:37.000000000 -0400
31841 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31842 char *buf) \
31843 { \
31844 struct scsi_device *sdev = to_scsi_device(dev); \
31845 - unsigned long long count = atomic_read(&sdev->field); \
31846 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31847 return snprintf(buf, 20, "0x%llx\n", count); \
31848 } \
31849 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31850 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c
31851 --- linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-05-19 00:06:34.000000000 -0400
31852 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-08-05 19:44:37.000000000 -0400
31853 @@ -485,7 +485,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31854 * Netlink Infrastructure
31855 */
31856
31857 -static atomic_t fc_event_seq;
31858 +static atomic_unchecked_t fc_event_seq;
31859
31860 /**
31861 * fc_get_event_number - Obtain the next sequential FC event number
31862 @@ -498,7 +498,7 @@ static atomic_t fc_event_seq;
31863 u32
31864 fc_get_event_number(void)
31865 {
31866 - return atomic_add_return(1, &fc_event_seq);
31867 + return atomic_add_return_unchecked(1, &fc_event_seq);
31868 }
31869 EXPORT_SYMBOL(fc_get_event_number);
31870
31871 @@ -646,7 +646,7 @@ static __init int fc_transport_init(void
31872 {
31873 int error;
31874
31875 - atomic_set(&fc_event_seq, 0);
31876 + atomic_set_unchecked(&fc_event_seq, 0);
31877
31878 error = transport_class_register(&fc_host_class);
31879 if (error)
31880 @@ -836,7 +836,7 @@ static int fc_str_to_dev_loss(const char
31881 char *cp;
31882
31883 *val = simple_strtoul(buf, &cp, 0);
31884 - if ((*cp && (*cp != '\n')) || (*val < 0))
31885 + if (*cp && (*cp != '\n'))
31886 return -EINVAL;
31887 /*
31888 * Check for overflow; dev_loss_tmo is u32
31889 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c
31890 --- linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-05-19 00:06:34.000000000 -0400
31891 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-05 19:44:37.000000000 -0400
31892 @@ -83,7 +83,7 @@ struct iscsi_internal {
31893 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31894 };
31895
31896 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31897 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31898 static struct workqueue_struct *iscsi_eh_timer_workq;
31899
31900 /*
31901 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31902 int err;
31903
31904 ihost = shost->shost_data;
31905 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31906 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31907
31908 if (id == ISCSI_MAX_TARGET) {
31909 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31910 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31911 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31912 ISCSI_TRANSPORT_VERSION);
31913
31914 - atomic_set(&iscsi_session_nr, 0);
31915 + atomic_set_unchecked(&iscsi_session_nr, 0);
31916
31917 err = class_register(&iscsi_transport_class);
31918 if (err)
31919 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c
31920 --- linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-05-19 00:06:34.000000000 -0400
31921 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-08-05 19:44:37.000000000 -0400
31922 @@ -33,7 +33,7 @@
31923 #include "scsi_transport_srp_internal.h"
31924
31925 struct srp_host_attrs {
31926 - atomic_t next_port_id;
31927 + atomic_unchecked_t next_port_id;
31928 };
31929 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31930
31931 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31932 struct Scsi_Host *shost = dev_to_shost(dev);
31933 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31934
31935 - atomic_set(&srp_host->next_port_id, 0);
31936 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31937 return 0;
31938 }
31939
31940 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31941 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31942 rport->roles = ids->roles;
31943
31944 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31945 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31946 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31947
31948 transport_setup_device(&rport->dev);
31949 diff -urNp linux-2.6.39.4/drivers/scsi/sg.c linux-2.6.39.4/drivers/scsi/sg.c
31950 --- linux-2.6.39.4/drivers/scsi/sg.c 2011-05-19 00:06:34.000000000 -0400
31951 +++ linux-2.6.39.4/drivers/scsi/sg.c 2011-08-05 19:44:37.000000000 -0400
31952 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31953 const struct file_operations * fops;
31954 };
31955
31956 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31957 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31958 {"allow_dio", &adio_fops},
31959 {"debug", &debug_fops},
31960 {"def_reserved_size", &dressz_fops},
31961 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31962 {
31963 int k, mask;
31964 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31965 - struct sg_proc_leaf * leaf;
31966 + const struct sg_proc_leaf * leaf;
31967
31968 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31969 if (!sg_proc_sgp)
31970 diff -urNp linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31971 --- linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-19 00:06:34.000000000 -0400
31972 +++ linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-05 19:44:37.000000000 -0400
31973 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31974 int do_iounmap = 0;
31975 int do_disable_device = 1;
31976
31977 + pax_track_stack();
31978 +
31979 memset(&sym_dev, 0, sizeof(sym_dev));
31980 memset(&nvram, 0, sizeof(nvram));
31981 sym_dev.pdev = pdev;
31982 diff -urNp linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c
31983 --- linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-05-19 00:06:34.000000000 -0400
31984 +++ linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-08-05 19:44:37.000000000 -0400
31985 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31986 dma_addr_t base;
31987 unsigned i;
31988
31989 + pax_track_stack();
31990 +
31991 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31992 cmd.reqRingNumPages = adapter->req_pages;
31993 cmd.cmpRingNumPages = adapter->cmp_pages;
31994 diff -urNp linux-2.6.39.4/drivers/spi/spi.c linux-2.6.39.4/drivers/spi/spi.c
31995 --- linux-2.6.39.4/drivers/spi/spi.c 2011-05-19 00:06:34.000000000 -0400
31996 +++ linux-2.6.39.4/drivers/spi/spi.c 2011-08-05 19:44:37.000000000 -0400
31997 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31998 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31999
32000 /* portable code must never pass more than 32 bytes */
32001 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
32002 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
32003
32004 static u8 *buf;
32005
32006 diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
32007 --- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-05-19 00:06:34.000000000 -0400
32008 +++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-14 12:12:59.000000000 -0400
32009 @@ -384,7 +384,7 @@ static struct ar_cookie s_ar_cookie_mem[
32010 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
32011
32012
32013 -static struct net_device_ops ar6000_netdev_ops = {
32014 +static net_device_ops_no_const ar6000_netdev_ops = {
32015 .ndo_init = NULL,
32016 .ndo_open = ar6000_open,
32017 .ndo_stop = ar6000_close,
32018 diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
32019 --- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-05-19 00:06:34.000000000 -0400
32020 +++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-14 09:32:05.000000000 -0400
32021 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
32022 typedef struct ar6k_pal_config_s
32023 {
32024 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
32025 -}ar6k_pal_config_t;
32026 +} __no_const ar6k_pal_config_t;
32027
32028 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
32029 #endif /* _AR6K_PAL_H_ */
32030 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
32031 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-05-19 00:06:34.000000000 -0400
32032 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-05 20:34:06.000000000 -0400
32033 @@ -857,14 +857,14 @@ static void dhd_op_if(dhd_if_t *ifp)
32034 free_netdev(ifp->net);
32035 }
32036 /* Allocate etherdev, including space for private structure */
32037 - ifp->net = alloc_etherdev(sizeof(dhd));
32038 + ifp->net = alloc_etherdev(sizeof(*dhd));
32039 if (!ifp->net) {
32040 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32041 ret = -ENOMEM;
32042 }
32043 if (ret == 0) {
32044 strcpy(ifp->net->name, ifp->name);
32045 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
32046 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
32047 err = dhd_net_attach(&dhd->pub, ifp->idx);
32048 if (err != 0) {
32049 DHD_ERROR(("%s: dhd_net_attach failed, "
32050 @@ -1923,7 +1923,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32051 strcpy(nv_path, nvram_path);
32052
32053 /* Allocate etherdev, including space for private structure */
32054 - net = alloc_etherdev(sizeof(dhd));
32055 + net = alloc_etherdev(sizeof(*dhd));
32056 if (!net) {
32057 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32058 goto fail;
32059 @@ -1939,7 +1939,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32060 /*
32061 * Save the dhd_info into the priv
32062 */
32063 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32064 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32065
32066 /* Set network interface name if it was provided as module parameter */
32067 if (iface_name[0]) {
32068 @@ -2056,7 +2056,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32069 /*
32070 * Save the dhd_info into the priv
32071 */
32072 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32073 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32074
32075 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
32076 g_bus = bus;
32077 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c
32078 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-05-19 00:06:34.000000000 -0400
32079 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-08-05 19:44:37.000000000 -0400
32080 @@ -495,7 +495,7 @@ wl_iw_get_range(struct net_device *dev,
32081 list = (wl_u32_list_t *) channels;
32082
32083 dwrq->length = sizeof(struct iw_range);
32084 - memset(range, 0, sizeof(range));
32085 + memset(range, 0, sizeof(*range));
32086
32087 range->min_nwid = range->max_nwid = 0;
32088
32089 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c
32090 --- linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-05-19 00:06:34.000000000 -0400
32091 +++ linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-08-05 19:44:37.000000000 -0400
32092 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
32093 struct net_device_stats *stats = &etdev->net_stats;
32094
32095 if (tcb->flags & fMP_DEST_BROAD)
32096 - atomic_inc(&etdev->Stats.brdcstxmt);
32097 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
32098 else if (tcb->flags & fMP_DEST_MULTI)
32099 - atomic_inc(&etdev->Stats.multixmt);
32100 + atomic_inc_unchecked(&etdev->Stats.multixmt);
32101 else
32102 - atomic_inc(&etdev->Stats.unixmt);
32103 + atomic_inc_unchecked(&etdev->Stats.unixmt);
32104
32105 if (tcb->skb) {
32106 stats->tx_bytes += tcb->skb->len;
32107 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h
32108 --- linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-05-19 00:06:34.000000000 -0400
32109 +++ linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-08-05 19:44:37.000000000 -0400
32110 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
32111 * operations
32112 */
32113 u32 unircv; /* # multicast packets received */
32114 - atomic_t unixmt; /* # multicast packets for Tx */
32115 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
32116 u32 multircv; /* # multicast packets received */
32117 - atomic_t multixmt; /* # multicast packets for Tx */
32118 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
32119 u32 brdcstrcv; /* # broadcast packets received */
32120 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
32121 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32122 u32 norcvbuf; /* # Rx packets discarded */
32123 u32 noxmtbuf; /* # Tx packets discarded */
32124
32125 diff -urNp linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c
32126 --- linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-05-19 00:06:34.000000000 -0400
32127 +++ linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-08-14 12:25:25.000000000 -0400
32128 @@ -230,8 +230,10 @@ int psb_mmap(struct file *filp, struct v
32129 if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
32130 dev_priv->ttm_vm_ops = (struct vm_operations_struct *)
32131 vma->vm_ops;
32132 - psb_ttm_vm_ops = *vma->vm_ops;
32133 - psb_ttm_vm_ops.fault = &psb_ttm_fault;
32134 + pax_open_kernel();
32135 + memcpy((void *)&psb_ttm_vm_ops, vma->vm_ops, sizeof(psb_ttm_vm_ops));
32136 + *(void **)&psb_ttm_vm_ops.fault = &psb_ttm_fault;
32137 + pax_close_kernel();
32138 }
32139
32140 vma->vm_ops = &psb_ttm_vm_ops;
32141 diff -urNp linux-2.6.39.4/drivers/staging/hv/channel.c linux-2.6.39.4/drivers/staging/hv/channel.c
32142 --- linux-2.6.39.4/drivers/staging/hv/channel.c 2011-05-19 00:06:34.000000000 -0400
32143 +++ linux-2.6.39.4/drivers/staging/hv/channel.c 2011-08-05 19:44:37.000000000 -0400
32144 @@ -509,8 +509,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32145 unsigned long flags;
32146 int ret = 0;
32147
32148 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32149 - atomic_inc(&vmbus_connection.next_gpadl_handle);
32150 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32151 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32152
32153 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32154 if (ret)
32155 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv.c linux-2.6.39.4/drivers/staging/hv/hv.c
32156 --- linux-2.6.39.4/drivers/staging/hv/hv.c 2011-05-19 00:06:34.000000000 -0400
32157 +++ linux-2.6.39.4/drivers/staging/hv/hv.c 2011-08-05 19:44:37.000000000 -0400
32158 @@ -163,7 +163,7 @@ static u64 do_hypercall(u64 control, voi
32159 u64 output_address = (output) ? virt_to_phys(output) : 0;
32160 u32 output_address_hi = output_address >> 32;
32161 u32 output_address_lo = output_address & 0xFFFFFFFF;
32162 - volatile void *hypercall_page = hv_context.hypercall_page;
32163 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32164
32165 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
32166 control, input, output);
32167 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv_mouse.c linux-2.6.39.4/drivers/staging/hv/hv_mouse.c
32168 --- linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-05-19 00:06:34.000000000 -0400
32169 +++ linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-08-13 20:26:10.000000000 -0400
32170 @@ -898,8 +898,10 @@ static void reportdesc_callback(struct h
32171 if (hid_dev) {
32172 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32173
32174 - hid_dev->ll_driver->open = mousevsc_hid_open;
32175 - hid_dev->ll_driver->close = mousevsc_hid_close;
32176 + pax_open_kernel();
32177 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32178 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32179 + pax_close_kernel();
32180
32181 hid_dev->bus = BUS_VIRTUAL;
32182 hid_dev->vendor = input_device_ctx->device_info.vendor;
32183 diff -urNp linux-2.6.39.4/drivers/staging/hv/rndis_filter.c linux-2.6.39.4/drivers/staging/hv/rndis_filter.c
32184 --- linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-05-19 00:06:34.000000000 -0400
32185 +++ linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-08-05 19:44:37.000000000 -0400
32186 @@ -49,7 +49,7 @@ struct rndis_device {
32187
32188 enum rndis_device_state state;
32189 u32 link_stat;
32190 - atomic_t new_req_id;
32191 + atomic_unchecked_t new_req_id;
32192
32193 spinlock_t request_lock;
32194 struct list_head req_list;
32195 @@ -144,7 +144,7 @@ static struct rndis_request *get_rndis_r
32196 * template
32197 */
32198 set = &rndis_msg->msg.set_req;
32199 - set->req_id = atomic_inc_return(&dev->new_req_id);
32200 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32201
32202 /* Add to the request list */
32203 spin_lock_irqsave(&dev->request_lock, flags);
32204 @@ -709,7 +709,7 @@ static void rndis_filter_halt_device(str
32205
32206 /* Setup the rndis set */
32207 halt = &request->request_msg.msg.halt_req;
32208 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32209 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32210
32211 /* Ignore return since this msg is optional. */
32212 rndis_filter_send_request(dev, request);
32213 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c
32214 --- linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-05-19 00:06:34.000000000 -0400
32215 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-08-05 19:44:37.000000000 -0400
32216 @@ -661,14 +661,14 @@ int vmbus_child_device_register(struct h
32217 {
32218 int ret = 0;
32219
32220 - static atomic_t device_num = ATOMIC_INIT(0);
32221 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32222
32223 DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
32224 child_device_obj);
32225
32226 /* Set the device name. Otherwise, device_register() will fail. */
32227 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32228 - atomic_inc_return(&device_num));
32229 + atomic_inc_return_unchecked(&device_num));
32230
32231 /* The new device belongs to this bus */
32232 child_device_obj->device.bus = &vmbus_drv.bus; /* device->dev.bus; */
32233 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_private.h linux-2.6.39.4/drivers/staging/hv/vmbus_private.h
32234 --- linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-05-19 00:06:34.000000000 -0400
32235 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-08-05 19:44:37.000000000 -0400
32236 @@ -58,7 +58,7 @@ enum vmbus_connect_state {
32237 struct vmbus_connection {
32238 enum vmbus_connect_state conn_state;
32239
32240 - atomic_t next_gpadl_handle;
32241 + atomic_unchecked_t next_gpadl_handle;
32242
32243 /*
32244 * Represents channel interrupts. Each bit position represents a
32245 diff -urNp linux-2.6.39.4/drivers/staging/iio/ring_generic.h linux-2.6.39.4/drivers/staging/iio/ring_generic.h
32246 --- linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-05-19 00:06:34.000000000 -0400
32247 +++ linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-08-13 20:14:25.000000000 -0400
32248 @@ -86,7 +86,7 @@ struct iio_ring_access_funcs {
32249
32250 int (*is_enabled)(struct iio_ring_buffer *ring);
32251 int (*enable)(struct iio_ring_buffer *ring);
32252 -};
32253 +} __no_const;
32254
32255 /**
32256 * struct iio_ring_buffer - general ring buffer structure
32257 @@ -134,7 +134,7 @@ struct iio_ring_buffer {
32258 struct iio_handler access_handler;
32259 struct iio_event_interface ev_int;
32260 struct iio_shared_ev_pointer shared_ev_pointer;
32261 - struct iio_ring_access_funcs access;
32262 + struct iio_ring_access_funcs access;
32263 int (*preenable)(struct iio_dev *);
32264 int (*postenable)(struct iio_dev *);
32265 int (*predisable)(struct iio_dev *);
32266 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet.c linux-2.6.39.4/drivers/staging/octeon/ethernet.c
32267 --- linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-05-19 00:06:34.000000000 -0400
32268 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-08-05 19:44:37.000000000 -0400
32269 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32270 * since the RX tasklet also increments it.
32271 */
32272 #ifdef CONFIG_64BIT
32273 - atomic64_add(rx_status.dropped_packets,
32274 - (atomic64_t *)&priv->stats.rx_dropped);
32275 + atomic64_add_unchecked(rx_status.dropped_packets,
32276 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32277 #else
32278 - atomic_add(rx_status.dropped_packets,
32279 - (atomic_t *)&priv->stats.rx_dropped);
32280 + atomic_add_unchecked(rx_status.dropped_packets,
32281 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32282 #endif
32283 }
32284
32285 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c
32286 --- linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-05-19 00:06:34.000000000 -0400
32287 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-08-05 19:44:37.000000000 -0400
32288 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32289 /* Increment RX stats for virtual ports */
32290 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32291 #ifdef CONFIG_64BIT
32292 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32293 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32294 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32295 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32296 #else
32297 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32298 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32299 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32300 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32301 #endif
32302 }
32303 netif_receive_skb(skb);
32304 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32305 dev->name);
32306 */
32307 #ifdef CONFIG_64BIT
32308 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32309 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32310 #else
32311 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32312 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32313 #endif
32314 dev_kfree_skb_irq(skb);
32315 }
32316 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/inode.c linux-2.6.39.4/drivers/staging/pohmelfs/inode.c
32317 --- linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-05-19 00:06:34.000000000 -0400
32318 +++ linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-08-05 19:44:37.000000000 -0400
32319 @@ -1855,7 +1855,7 @@ static int pohmelfs_fill_super(struct su
32320 mutex_init(&psb->mcache_lock);
32321 psb->mcache_root = RB_ROOT;
32322 psb->mcache_timeout = msecs_to_jiffies(5000);
32323 - atomic_long_set(&psb->mcache_gen, 0);
32324 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32325
32326 psb->trans_max_pages = 100;
32327
32328 @@ -1870,7 +1870,7 @@ static int pohmelfs_fill_super(struct su
32329 INIT_LIST_HEAD(&psb->crypto_ready_list);
32330 INIT_LIST_HEAD(&psb->crypto_active_list);
32331
32332 - atomic_set(&psb->trans_gen, 1);
32333 + atomic_set_unchecked(&psb->trans_gen, 1);
32334 atomic_long_set(&psb->total_inodes, 0);
32335
32336 mutex_init(&psb->state_lock);
32337 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c
32338 --- linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-05-19 00:06:34.000000000 -0400
32339 +++ linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-08-05 19:44:37.000000000 -0400
32340 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32341 m->data = data;
32342 m->start = start;
32343 m->size = size;
32344 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32345 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32346
32347 mutex_lock(&psb->mcache_lock);
32348 err = pohmelfs_mcache_insert(psb, m);
32349 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h
32350 --- linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-05-19 00:06:34.000000000 -0400
32351 +++ linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-08-05 19:44:37.000000000 -0400
32352 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32353 struct pohmelfs_sb {
32354 struct rb_root mcache_root;
32355 struct mutex mcache_lock;
32356 - atomic_long_t mcache_gen;
32357 + atomic_long_unchecked_t mcache_gen;
32358 unsigned long mcache_timeout;
32359
32360 unsigned int idx;
32361
32362 unsigned int trans_retries;
32363
32364 - atomic_t trans_gen;
32365 + atomic_unchecked_t trans_gen;
32366
32367 unsigned int crypto_attached_size;
32368 unsigned int crypto_align_size;
32369 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/trans.c linux-2.6.39.4/drivers/staging/pohmelfs/trans.c
32370 --- linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-05-19 00:06:34.000000000 -0400
32371 +++ linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-08-05 19:44:37.000000000 -0400
32372 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32373 int err;
32374 struct netfs_cmd *cmd = t->iovec.iov_base;
32375
32376 - t->gen = atomic_inc_return(&psb->trans_gen);
32377 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32378
32379 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32380 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32381 diff -urNp linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h
32382 --- linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-05-19 00:06:34.000000000 -0400
32383 +++ linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-13 20:31:57.000000000 -0400
32384 @@ -83,7 +83,7 @@ struct _io_ops {
32385 u8 *pmem);
32386 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32387 u8 *pmem);
32388 -};
32389 +} __no_const;
32390
32391 struct io_req {
32392 struct list_head list;
32393 diff -urNp linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c
32394 --- linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-05-19 00:06:34.000000000 -0400
32395 +++ linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-14 12:29:10.000000000 -0400
32396 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32397 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32398
32399 if (rlen)
32400 - if (copy_to_user(data, &resp, rlen))
32401 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32402 return -EFAULT;
32403
32404 return 0;
32405 diff -urNp linux-2.6.39.4/drivers/staging/tty/istallion.c linux-2.6.39.4/drivers/staging/tty/istallion.c
32406 --- linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-05-19 00:06:34.000000000 -0400
32407 +++ linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-08-05 19:44:37.000000000 -0400
32408 @@ -186,7 +186,6 @@ static struct ktermios stli_deftermios
32409 * re-used for each stats call.
32410 */
32411 static comstats_t stli_comstats;
32412 -static combrd_t stli_brdstats;
32413 static struct asystats stli_cdkstats;
32414
32415 /*****************************************************************************/
32416 @@ -4003,6 +4002,7 @@ out:
32417
32418 static int stli_getbrdstats(combrd_t __user *bp)
32419 {
32420 + combrd_t stli_brdstats;
32421 struct stlibrd *brdp;
32422 unsigned int i;
32423
32424 @@ -4226,6 +4226,8 @@ static int stli_getportstruct(struct stl
32425 struct stliport stli_dummyport;
32426 struct stliport *portp;
32427
32428 + pax_track_stack();
32429 +
32430 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32431 return -EFAULT;
32432 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32433 @@ -4248,6 +4250,8 @@ static int stli_getbrdstruct(struct stli
32434 struct stlibrd stli_dummybrd;
32435 struct stlibrd *brdp;
32436
32437 + pax_track_stack();
32438 +
32439 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32440 return -EFAULT;
32441 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32442 diff -urNp linux-2.6.39.4/drivers/staging/tty/stallion.c linux-2.6.39.4/drivers/staging/tty/stallion.c
32443 --- linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-05-19 00:06:34.000000000 -0400
32444 +++ linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-08-05 19:44:37.000000000 -0400
32445 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32446 struct stlport stl_dummyport;
32447 struct stlport *portp;
32448
32449 + pax_track_stack();
32450 +
32451 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32452 return -EFAULT;
32453 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32454 diff -urNp linux-2.6.39.4/drivers/staging/usbip/stub_dev.c linux-2.6.39.4/drivers/staging/usbip/stub_dev.c
32455 --- linux-2.6.39.4/drivers/staging/usbip/stub_dev.c 2011-05-19 00:06:34.000000000 -0400
32456 +++ linux-2.6.39.4/drivers/staging/usbip/stub_dev.c 2011-08-13 20:32:52.000000000 -0400
32457 @@ -357,9 +357,11 @@ static struct stub_device *stub_device_a
32458
32459 init_waitqueue_head(&sdev->tx_waitq);
32460
32461 - sdev->ud.eh_ops.shutdown = stub_shutdown_connection;
32462 - sdev->ud.eh_ops.reset = stub_device_reset;
32463 - sdev->ud.eh_ops.unusable = stub_device_unusable;
32464 + pax_open_kernel();
32465 + *(void **)&sdev->ud.eh_ops.shutdown = stub_shutdown_connection;
32466 + *(void **)&sdev->ud.eh_ops.reset = stub_device_reset;
32467 + *(void **)&sdev->ud.eh_ops.unusable = stub_device_unusable;
32468 + pax_close_kernel();
32469
32470 usbip_start_eh(&sdev->ud);
32471
32472 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci.h linux-2.6.39.4/drivers/staging/usbip/vhci.h
32473 --- linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-05-19 00:06:34.000000000 -0400
32474 +++ linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-08-05 19:44:37.000000000 -0400
32475 @@ -92,7 +92,7 @@ struct vhci_hcd {
32476 unsigned resuming:1;
32477 unsigned long re_timeout;
32478
32479 - atomic_t seqnum;
32480 + atomic_unchecked_t seqnum;
32481
32482 /*
32483 * NOTE:
32484 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c
32485 --- linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-05-19 00:06:34.000000000 -0400
32486 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-08-13 20:33:49.000000000 -0400
32487 @@ -536,7 +536,7 @@ static void vhci_tx_urb(struct urb *urb)
32488 return;
32489 }
32490
32491 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32492 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32493 if (priv->seqnum == 0xffff)
32494 usbip_uinfo("seqnum max\n");
32495
32496 @@ -795,7 +795,7 @@ static int vhci_urb_dequeue(struct usb_h
32497 return -ENOMEM;
32498 }
32499
32500 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32501 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32502 if (unlink->seqnum == 0xffff)
32503 usbip_uinfo("seqnum max\n");
32504
32505 @@ -965,9 +965,11 @@ static void vhci_device_init(struct vhci
32506
32507 init_waitqueue_head(&vdev->waitq_tx);
32508
32509 - vdev->ud.eh_ops.shutdown = vhci_shutdown_connection;
32510 - vdev->ud.eh_ops.reset = vhci_device_reset;
32511 - vdev->ud.eh_ops.unusable = vhci_device_unusable;
32512 + pax_open_kernel();
32513 + *(void **)&vdev->ud.eh_ops.shutdown = vhci_shutdown_connection;
32514 + *(void **)&vdev->ud.eh_ops.reset = vhci_device_reset;
32515 + *(void **)&vdev->ud.eh_ops.unusable = vhci_device_unusable;
32516 + pax_close_kernel();
32517
32518 usbip_start_eh(&vdev->ud);
32519 }
32520 @@ -992,7 +994,7 @@ static int vhci_start(struct usb_hcd *hc
32521 vdev->rhport = rhport;
32522 }
32523
32524 - atomic_set(&vhci->seqnum, 0);
32525 + atomic_set_unchecked(&vhci->seqnum, 0);
32526 spin_lock_init(&vhci->lock);
32527
32528
32529 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c
32530 --- linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-05-19 00:06:34.000000000 -0400
32531 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-08-05 19:44:37.000000000 -0400
32532 @@ -81,7 +81,7 @@ static void vhci_recv_ret_submit(struct
32533 usbip_uerr("cannot find a urb of seqnum %u\n",
32534 pdu->base.seqnum);
32535 usbip_uinfo("max seqnum %d\n",
32536 - atomic_read(&the_controller->seqnum));
32537 + atomic_read_unchecked(&the_controller->seqnum));
32538 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32539 return;
32540 }
32541 diff -urNp linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c
32542 --- linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-05-19 00:06:34.000000000 -0400
32543 +++ linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-13 20:36:25.000000000 -0400
32544 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32545
32546 struct usbctlx_completor {
32547 int (*complete) (struct usbctlx_completor *);
32548 -};
32549 +} __no_const;
32550
32551 static int
32552 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32553 diff -urNp linux-2.6.39.4/drivers/target/target_core_alua.c linux-2.6.39.4/drivers/target/target_core_alua.c
32554 --- linux-2.6.39.4/drivers/target/target_core_alua.c 2011-05-19 00:06:34.000000000 -0400
32555 +++ linux-2.6.39.4/drivers/target/target_core_alua.c 2011-08-05 19:44:37.000000000 -0400
32556 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32557 char path[ALUA_METADATA_PATH_LEN];
32558 int len;
32559
32560 + pax_track_stack();
32561 +
32562 memset(path, 0, ALUA_METADATA_PATH_LEN);
32563
32564 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32565 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32566 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32567 int len;
32568
32569 + pax_track_stack();
32570 +
32571 memset(path, 0, ALUA_METADATA_PATH_LEN);
32572 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32573
32574 diff -urNp linux-2.6.39.4/drivers/target/target_core_cdb.c linux-2.6.39.4/drivers/target/target_core_cdb.c
32575 --- linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-05-19 00:06:34.000000000 -0400
32576 +++ linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-08-05 19:44:37.000000000 -0400
32577 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32578 int length = 0;
32579 unsigned char buf[SE_MODE_PAGE_BUF];
32580
32581 + pax_track_stack();
32582 +
32583 memset(buf, 0, SE_MODE_PAGE_BUF);
32584
32585 switch (cdb[2] & 0x3f) {
32586 diff -urNp linux-2.6.39.4/drivers/target/target_core_configfs.c linux-2.6.39.4/drivers/target/target_core_configfs.c
32587 --- linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-05-19 00:06:34.000000000 -0400
32588 +++ linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-08-05 20:34:06.000000000 -0400
32589 @@ -1280,6 +1280,8 @@ static ssize_t target_core_dev_pr_show_a
32590 ssize_t len = 0;
32591 int reg_count = 0, prf_isid;
32592
32593 + pax_track_stack();
32594 +
32595 if (!(su_dev->se_dev_ptr))
32596 return -ENODEV;
32597
32598 diff -urNp linux-2.6.39.4/drivers/target/target_core_pr.c linux-2.6.39.4/drivers/target/target_core_pr.c
32599 --- linux-2.6.39.4/drivers/target/target_core_pr.c 2011-05-19 00:06:34.000000000 -0400
32600 +++ linux-2.6.39.4/drivers/target/target_core_pr.c 2011-08-05 19:44:37.000000000 -0400
32601 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32602 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32603 u16 tpgt;
32604
32605 + pax_track_stack();
32606 +
32607 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32608 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32609 /*
32610 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32611 ssize_t len = 0;
32612 int reg_count = 0;
32613
32614 + pax_track_stack();
32615 +
32616 memset(buf, 0, pr_aptpl_buf_len);
32617 /*
32618 * Called to clear metadata once APTPL has been deactivated.
32619 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32620 char path[512];
32621 int ret;
32622
32623 + pax_track_stack();
32624 +
32625 memset(iov, 0, sizeof(struct iovec));
32626 memset(path, 0, 512);
32627
32628 diff -urNp linux-2.6.39.4/drivers/target/target_core_tmr.c linux-2.6.39.4/drivers/target/target_core_tmr.c
32629 --- linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-06-03 00:04:14.000000000 -0400
32630 +++ linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-08-05 19:44:37.000000000 -0400
32631 @@ -263,7 +263,7 @@ int core_tmr_lun_reset(
32632 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32633 T_TASK(cmd)->t_task_cdbs,
32634 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32635 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32636 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32637 atomic_read(&T_TASK(cmd)->t_transport_active),
32638 atomic_read(&T_TASK(cmd)->t_transport_stop),
32639 atomic_read(&T_TASK(cmd)->t_transport_sent));
32640 @@ -305,7 +305,7 @@ int core_tmr_lun_reset(
32641 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32642 " task: %p, t_fe_count: %d dev: %p\n", task,
32643 fe_count, dev);
32644 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32645 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32646 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32647 flags);
32648 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32649 @@ -315,7 +315,7 @@ int core_tmr_lun_reset(
32650 }
32651 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32652 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32653 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32654 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32655 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32656 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32657
32658 diff -urNp linux-2.6.39.4/drivers/target/target_core_transport.c linux-2.6.39.4/drivers/target/target_core_transport.c
32659 --- linux-2.6.39.4/drivers/target/target_core_transport.c 2011-06-03 00:04:14.000000000 -0400
32660 +++ linux-2.6.39.4/drivers/target/target_core_transport.c 2011-08-05 19:44:37.000000000 -0400
32661 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32662
32663 dev->queue_depth = dev_limits->queue_depth;
32664 atomic_set(&dev->depth_left, dev->queue_depth);
32665 - atomic_set(&dev->dev_ordered_id, 0);
32666 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32667
32668 se_dev_set_default_attribs(dev, dev_limits);
32669
32670 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32671 * Used to determine when ORDERED commands should go from
32672 * Dormant to Active status.
32673 */
32674 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32675 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32676 smp_mb__after_atomic_inc();
32677 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32678 cmd->se_ordered_id, cmd->sam_task_attr,
32679 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32680 " t_transport_active: %d t_transport_stop: %d"
32681 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32682 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32683 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32684 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32685 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32686 atomic_read(&T_TASK(cmd)->t_transport_active),
32687 atomic_read(&T_TASK(cmd)->t_transport_stop),
32688 @@ -2673,9 +2673,9 @@ check_depth:
32689 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32690 atomic_set(&task->task_active, 1);
32691 atomic_set(&task->task_sent, 1);
32692 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32693 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32694
32695 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32696 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32697 T_TASK(cmd)->t_task_cdbs)
32698 atomic_set(&cmd->transport_sent, 1);
32699
32700 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32701 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32702 }
32703 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32704 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32705 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32706 goto remove;
32707
32708 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32709 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32710 {
32711 int ret = 0;
32712
32713 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32714 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32715 if (!(send_status) ||
32716 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32717 return 1;
32718 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32719 */
32720 if (cmd->data_direction == DMA_TO_DEVICE) {
32721 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32722 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32723 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32724 smp_mb__after_atomic_inc();
32725 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32726 transport_new_cmd_failure(cmd);
32727 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32728 CMD_TFO(cmd)->get_task_tag(cmd),
32729 T_TASK(cmd)->t_task_cdbs,
32730 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32731 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32732 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32733 atomic_read(&T_TASK(cmd)->t_transport_active),
32734 atomic_read(&T_TASK(cmd)->t_transport_stop),
32735 atomic_read(&T_TASK(cmd)->t_transport_sent));
32736 diff -urNp linux-2.6.39.4/drivers/telephony/ixj.c linux-2.6.39.4/drivers/telephony/ixj.c
32737 --- linux-2.6.39.4/drivers/telephony/ixj.c 2011-05-19 00:06:34.000000000 -0400
32738 +++ linux-2.6.39.4/drivers/telephony/ixj.c 2011-08-05 19:44:37.000000000 -0400
32739 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32740 bool mContinue;
32741 char *pIn, *pOut;
32742
32743 + pax_track_stack();
32744 +
32745 if (!SCI_Prepare(j))
32746 return 0;
32747
32748 diff -urNp linux-2.6.39.4/drivers/tty/hvc/hvcs.c linux-2.6.39.4/drivers/tty/hvc/hvcs.c
32749 --- linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-05-19 00:06:34.000000000 -0400
32750 +++ linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-08-05 19:44:37.000000000 -0400
32751 @@ -83,6 +83,7 @@
32752 #include <asm/hvcserver.h>
32753 #include <asm/uaccess.h>
32754 #include <asm/vio.h>
32755 +#include <asm/local.h>
32756
32757 /*
32758 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32759 @@ -270,7 +271,7 @@ struct hvcs_struct {
32760 unsigned int index;
32761
32762 struct tty_struct *tty;
32763 - int open_count;
32764 + local_t open_count;
32765
32766 /*
32767 * Used to tell the driver kernel_thread what operations need to take
32768 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32769
32770 spin_lock_irqsave(&hvcsd->lock, flags);
32771
32772 - if (hvcsd->open_count > 0) {
32773 + if (local_read(&hvcsd->open_count) > 0) {
32774 spin_unlock_irqrestore(&hvcsd->lock, flags);
32775 printk(KERN_INFO "HVCS: vterm state unchanged. "
32776 "The hvcs device node is still in use.\n");
32777 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32778 if ((retval = hvcs_partner_connect(hvcsd)))
32779 goto error_release;
32780
32781 - hvcsd->open_count = 1;
32782 + local_set(&hvcsd->open_count, 1);
32783 hvcsd->tty = tty;
32784 tty->driver_data = hvcsd;
32785
32786 @@ -1179,7 +1180,7 @@ fast_open:
32787
32788 spin_lock_irqsave(&hvcsd->lock, flags);
32789 kref_get(&hvcsd->kref);
32790 - hvcsd->open_count++;
32791 + local_inc(&hvcsd->open_count);
32792 hvcsd->todo_mask |= HVCS_SCHED_READ;
32793 spin_unlock_irqrestore(&hvcsd->lock, flags);
32794
32795 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32796 hvcsd = tty->driver_data;
32797
32798 spin_lock_irqsave(&hvcsd->lock, flags);
32799 - if (--hvcsd->open_count == 0) {
32800 + if (local_dec_and_test(&hvcsd->open_count)) {
32801
32802 vio_disable_interrupts(hvcsd->vdev);
32803
32804 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32805 free_irq(irq, hvcsd);
32806 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32807 return;
32808 - } else if (hvcsd->open_count < 0) {
32809 + } else if (local_read(&hvcsd->open_count) < 0) {
32810 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32811 " is missmanaged.\n",
32812 - hvcsd->vdev->unit_address, hvcsd->open_count);
32813 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32814 }
32815
32816 spin_unlock_irqrestore(&hvcsd->lock, flags);
32817 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32818
32819 spin_lock_irqsave(&hvcsd->lock, flags);
32820 /* Preserve this so that we know how many kref refs to put */
32821 - temp_open_count = hvcsd->open_count;
32822 + temp_open_count = local_read(&hvcsd->open_count);
32823
32824 /*
32825 * Don't kref put inside the spinlock because the destruction
32826 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32827 hvcsd->tty->driver_data = NULL;
32828 hvcsd->tty = NULL;
32829
32830 - hvcsd->open_count = 0;
32831 + local_set(&hvcsd->open_count, 0);
32832
32833 /* This will drop any buffered data on the floor which is OK in a hangup
32834 * scenario. */
32835 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32836 * the middle of a write operation? This is a crummy place to do this
32837 * but we want to keep it all in the spinlock.
32838 */
32839 - if (hvcsd->open_count <= 0) {
32840 + if (local_read(&hvcsd->open_count) <= 0) {
32841 spin_unlock_irqrestore(&hvcsd->lock, flags);
32842 return -ENODEV;
32843 }
32844 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32845 {
32846 struct hvcs_struct *hvcsd = tty->driver_data;
32847
32848 - if (!hvcsd || hvcsd->open_count <= 0)
32849 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32850 return 0;
32851
32852 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32853 diff -urNp linux-2.6.39.4/drivers/tty/ipwireless/tty.c linux-2.6.39.4/drivers/tty/ipwireless/tty.c
32854 --- linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-05-19 00:06:34.000000000 -0400
32855 +++ linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-08-05 19:44:37.000000000 -0400
32856 @@ -29,6 +29,7 @@
32857 #include <linux/tty_driver.h>
32858 #include <linux/tty_flip.h>
32859 #include <linux/uaccess.h>
32860 +#include <asm/local.h>
32861
32862 #include "tty.h"
32863 #include "network.h"
32864 @@ -51,7 +52,7 @@ struct ipw_tty {
32865 int tty_type;
32866 struct ipw_network *network;
32867 struct tty_struct *linux_tty;
32868 - int open_count;
32869 + local_t open_count;
32870 unsigned int control_lines;
32871 struct mutex ipw_tty_mutex;
32872 int tx_bytes_queued;
32873 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32874 mutex_unlock(&tty->ipw_tty_mutex);
32875 return -ENODEV;
32876 }
32877 - if (tty->open_count == 0)
32878 + if (local_read(&tty->open_count) == 0)
32879 tty->tx_bytes_queued = 0;
32880
32881 - tty->open_count++;
32882 + local_inc(&tty->open_count);
32883
32884 tty->linux_tty = linux_tty;
32885 linux_tty->driver_data = tty;
32886 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32887
32888 static void do_ipw_close(struct ipw_tty *tty)
32889 {
32890 - tty->open_count--;
32891 -
32892 - if (tty->open_count == 0) {
32893 + if (local_dec_return(&tty->open_count) == 0) {
32894 struct tty_struct *linux_tty = tty->linux_tty;
32895
32896 if (linux_tty != NULL) {
32897 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32898 return;
32899
32900 mutex_lock(&tty->ipw_tty_mutex);
32901 - if (tty->open_count == 0) {
32902 + if (local_read(&tty->open_count) == 0) {
32903 mutex_unlock(&tty->ipw_tty_mutex);
32904 return;
32905 }
32906 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32907 return;
32908 }
32909
32910 - if (!tty->open_count) {
32911 + if (!local_read(&tty->open_count)) {
32912 mutex_unlock(&tty->ipw_tty_mutex);
32913 return;
32914 }
32915 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32916 return -ENODEV;
32917
32918 mutex_lock(&tty->ipw_tty_mutex);
32919 - if (!tty->open_count) {
32920 + if (!local_read(&tty->open_count)) {
32921 mutex_unlock(&tty->ipw_tty_mutex);
32922 return -EINVAL;
32923 }
32924 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32925 if (!tty)
32926 return -ENODEV;
32927
32928 - if (!tty->open_count)
32929 + if (!local_read(&tty->open_count))
32930 return -EINVAL;
32931
32932 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32933 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32934 if (!tty)
32935 return 0;
32936
32937 - if (!tty->open_count)
32938 + if (!local_read(&tty->open_count))
32939 return 0;
32940
32941 return tty->tx_bytes_queued;
32942 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32943 if (!tty)
32944 return -ENODEV;
32945
32946 - if (!tty->open_count)
32947 + if (!local_read(&tty->open_count))
32948 return -EINVAL;
32949
32950 return get_control_lines(tty);
32951 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32952 if (!tty)
32953 return -ENODEV;
32954
32955 - if (!tty->open_count)
32956 + if (!local_read(&tty->open_count))
32957 return -EINVAL;
32958
32959 return set_control_lines(tty, set, clear);
32960 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32961 if (!tty)
32962 return -ENODEV;
32963
32964 - if (!tty->open_count)
32965 + if (!local_read(&tty->open_count))
32966 return -EINVAL;
32967
32968 /* FIXME: Exactly how is the tty object locked here .. */
32969 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32970 against a parallel ioctl etc */
32971 mutex_lock(&ttyj->ipw_tty_mutex);
32972 }
32973 - while (ttyj->open_count)
32974 + while (local_read(&ttyj->open_count))
32975 do_ipw_close(ttyj);
32976 ipwireless_disassociate_network_ttys(network,
32977 ttyj->channel_idx);
32978 diff -urNp linux-2.6.39.4/drivers/tty/n_gsm.c linux-2.6.39.4/drivers/tty/n_gsm.c
32979 --- linux-2.6.39.4/drivers/tty/n_gsm.c 2011-05-19 00:06:34.000000000 -0400
32980 +++ linux-2.6.39.4/drivers/tty/n_gsm.c 2011-08-05 19:44:37.000000000 -0400
32981 @@ -1588,7 +1588,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32982 return NULL;
32983 spin_lock_init(&dlci->lock);
32984 dlci->fifo = &dlci->_fifo;
32985 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32986 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32987 kfree(dlci);
32988 return NULL;
32989 }
32990 diff -urNp linux-2.6.39.4/drivers/tty/n_tty.c linux-2.6.39.4/drivers/tty/n_tty.c
32991 --- linux-2.6.39.4/drivers/tty/n_tty.c 2011-05-19 00:06:34.000000000 -0400
32992 +++ linux-2.6.39.4/drivers/tty/n_tty.c 2011-08-05 19:44:37.000000000 -0400
32993 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32994 {
32995 *ops = tty_ldisc_N_TTY;
32996 ops->owner = NULL;
32997 - ops->refcount = ops->flags = 0;
32998 + atomic_set(&ops->refcount, 0);
32999 + ops->flags = 0;
33000 }
33001 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
33002 diff -urNp linux-2.6.39.4/drivers/tty/pty.c linux-2.6.39.4/drivers/tty/pty.c
33003 --- linux-2.6.39.4/drivers/tty/pty.c 2011-05-19 00:06:34.000000000 -0400
33004 +++ linux-2.6.39.4/drivers/tty/pty.c 2011-08-05 20:34:06.000000000 -0400
33005 @@ -753,8 +753,10 @@ static void __init unix98_pty_init(void)
33006 register_sysctl_table(pty_root_table);
33007
33008 /* Now create the /dev/ptmx special device */
33009 + pax_open_kernel();
33010 tty_default_fops(&ptmx_fops);
33011 - ptmx_fops.open = ptmx_open;
33012 + *(void **)&ptmx_fops.open = ptmx_open;
33013 + pax_close_kernel();
33014
33015 cdev_init(&ptmx_cdev, &ptmx_fops);
33016 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33017 diff -urNp linux-2.6.39.4/drivers/tty/rocket.c linux-2.6.39.4/drivers/tty/rocket.c
33018 --- linux-2.6.39.4/drivers/tty/rocket.c 2011-05-19 00:06:34.000000000 -0400
33019 +++ linux-2.6.39.4/drivers/tty/rocket.c 2011-08-05 19:44:37.000000000 -0400
33020 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
33021 struct rocket_ports tmp;
33022 int board;
33023
33024 + pax_track_stack();
33025 +
33026 if (!retports)
33027 return -EFAULT;
33028 memset(&tmp, 0, sizeof (tmp));
33029 diff -urNp linux-2.6.39.4/drivers/tty/serial/kgdboc.c linux-2.6.39.4/drivers/tty/serial/kgdboc.c
33030 --- linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-05-19 00:06:34.000000000 -0400
33031 +++ linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-08-05 20:34:06.000000000 -0400
33032 @@ -23,8 +23,9 @@
33033 #define MAX_CONFIG_LEN 40
33034
33035 static struct kgdb_io kgdboc_io_ops;
33036 +static struct kgdb_io kgdboc_io_ops_console;
33037
33038 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
33039 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
33040 static int configured = -1;
33041
33042 static char config[MAX_CONFIG_LEN];
33043 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
33044 kgdboc_unregister_kbd();
33045 if (configured == 1)
33046 kgdb_unregister_io_module(&kgdboc_io_ops);
33047 + else if (configured == 2)
33048 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
33049 }
33050
33051 static int configure_kgdboc(void)
33052 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
33053 int err;
33054 char *cptr = config;
33055 struct console *cons;
33056 + int is_console = 0;
33057
33058 err = kgdboc_option_setup(config);
33059 if (err || !strlen(config) || isspace(config[0]))
33060 goto noconfig;
33061
33062 err = -ENODEV;
33063 - kgdboc_io_ops.is_console = 0;
33064 kgdb_tty_driver = NULL;
33065
33066 kgdboc_use_kms = 0;
33067 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
33068 int idx;
33069 if (cons->device && cons->device(cons, &idx) == p &&
33070 idx == tty_line) {
33071 - kgdboc_io_ops.is_console = 1;
33072 + is_console = 1;
33073 break;
33074 }
33075 cons = cons->next;
33076 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
33077 kgdb_tty_line = tty_line;
33078
33079 do_register:
33080 - err = kgdb_register_io_module(&kgdboc_io_ops);
33081 + if (is_console) {
33082 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
33083 + configured = 2;
33084 + } else {
33085 + err = kgdb_register_io_module(&kgdboc_io_ops);
33086 + configured = 1;
33087 + }
33088 if (err)
33089 goto noconfig;
33090
33091 - configured = 1;
33092 -
33093 return 0;
33094
33095 noconfig:
33096 @@ -212,7 +219,7 @@ noconfig:
33097 static int __init init_kgdboc(void)
33098 {
33099 /* Already configured? */
33100 - if (configured == 1)
33101 + if (configured >= 1)
33102 return 0;
33103
33104 return configure_kgdboc();
33105 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
33106 if (config[len - 1] == '\n')
33107 config[len - 1] = '\0';
33108
33109 - if (configured == 1)
33110 + if (configured >= 1)
33111 cleanup_kgdboc();
33112
33113 /* Go and configure with the new params. */
33114 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
33115 .post_exception = kgdboc_post_exp_handler,
33116 };
33117
33118 +static struct kgdb_io kgdboc_io_ops_console = {
33119 + .name = "kgdboc",
33120 + .read_char = kgdboc_get_char,
33121 + .write_char = kgdboc_put_char,
33122 + .pre_exception = kgdboc_pre_exp_handler,
33123 + .post_exception = kgdboc_post_exp_handler,
33124 + .is_console = 1
33125 +};
33126 +
33127 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33128 /* This is only available if kgdboc is a built in for early debugging */
33129 static int __init kgdboc_early_init(char *opt)
33130 diff -urNp linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c
33131 --- linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-05-19 00:06:34.000000000 -0400
33132 +++ linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-08-05 20:34:06.000000000 -0400
33133 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33134 int loop = 1, num, total = 0;
33135 u8 recv_buf[512], *pbuf;
33136
33137 + pax_track_stack();
33138 +
33139 pbuf = recv_buf;
33140 do {
33141 num = max3110_read_multi(max, pbuf);
33142 diff -urNp linux-2.6.39.4/drivers/tty/tty_io.c linux-2.6.39.4/drivers/tty/tty_io.c
33143 --- linux-2.6.39.4/drivers/tty/tty_io.c 2011-05-19 00:06:34.000000000 -0400
33144 +++ linux-2.6.39.4/drivers/tty/tty_io.c 2011-08-05 20:34:06.000000000 -0400
33145 @@ -3200,7 +3200,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33146
33147 void tty_default_fops(struct file_operations *fops)
33148 {
33149 - *fops = tty_fops;
33150 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33151 }
33152
33153 /*
33154 diff -urNp linux-2.6.39.4/drivers/tty/tty_ldisc.c linux-2.6.39.4/drivers/tty/tty_ldisc.c
33155 --- linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-07-09 09:18:51.000000000 -0400
33156 +++ linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-08-05 19:44:37.000000000 -0400
33157 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33158 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33159 struct tty_ldisc_ops *ldo = ld->ops;
33160
33161 - ldo->refcount--;
33162 + atomic_dec(&ldo->refcount);
33163 module_put(ldo->owner);
33164 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33165
33166 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33167 spin_lock_irqsave(&tty_ldisc_lock, flags);
33168 tty_ldiscs[disc] = new_ldisc;
33169 new_ldisc->num = disc;
33170 - new_ldisc->refcount = 0;
33171 + atomic_set(&new_ldisc->refcount, 0);
33172 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33173
33174 return ret;
33175 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33176 return -EINVAL;
33177
33178 spin_lock_irqsave(&tty_ldisc_lock, flags);
33179 - if (tty_ldiscs[disc]->refcount)
33180 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33181 ret = -EBUSY;
33182 else
33183 tty_ldiscs[disc] = NULL;
33184 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33185 if (ldops) {
33186 ret = ERR_PTR(-EAGAIN);
33187 if (try_module_get(ldops->owner)) {
33188 - ldops->refcount++;
33189 + atomic_inc(&ldops->refcount);
33190 ret = ldops;
33191 }
33192 }
33193 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33194 unsigned long flags;
33195
33196 spin_lock_irqsave(&tty_ldisc_lock, flags);
33197 - ldops->refcount--;
33198 + atomic_dec(&ldops->refcount);
33199 module_put(ldops->owner);
33200 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33201 }
33202 diff -urNp linux-2.6.39.4/drivers/tty/vt/keyboard.c linux-2.6.39.4/drivers/tty/vt/keyboard.c
33203 --- linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-05-19 00:06:34.000000000 -0400
33204 +++ linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-08-05 19:44:37.000000000 -0400
33205 @@ -658,6 +658,16 @@ static void k_spec(struct vc_data *vc, u
33206 kbd->kbdmode == VC_OFF) &&
33207 value != KVAL(K_SAK))
33208 return; /* SAK is allowed even in raw mode */
33209 +
33210 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33211 + {
33212 + void *func = fn_handler[value];
33213 + if (func == fn_show_state || func == fn_show_ptregs ||
33214 + func == fn_show_mem)
33215 + return;
33216 + }
33217 +#endif
33218 +
33219 fn_handler[value](vc);
33220 }
33221
33222 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt.c linux-2.6.39.4/drivers/tty/vt/vt.c
33223 --- linux-2.6.39.4/drivers/tty/vt/vt.c 2011-05-19 00:06:34.000000000 -0400
33224 +++ linux-2.6.39.4/drivers/tty/vt/vt.c 2011-08-05 19:44:37.000000000 -0400
33225 @@ -261,7 +261,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33226
33227 static void notify_write(struct vc_data *vc, unsigned int unicode)
33228 {
33229 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33230 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33231 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33232 }
33233
33234 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c
33235 --- linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-05-19 00:06:34.000000000 -0400
33236 +++ linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-08-05 19:44:37.000000000 -0400
33237 @@ -209,9 +209,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33238 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33239 return -EFAULT;
33240
33241 - if (!capable(CAP_SYS_TTY_CONFIG))
33242 - perm = 0;
33243 -
33244 switch (cmd) {
33245 case KDGKBENT:
33246 key_map = key_maps[s];
33247 @@ -223,6 +220,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33248 val = (i ? K_HOLE : K_NOSUCHMAP);
33249 return put_user(val, &user_kbe->kb_value);
33250 case KDSKBENT:
33251 + if (!capable(CAP_SYS_TTY_CONFIG))
33252 + perm = 0;
33253 +
33254 if (!perm)
33255 return -EPERM;
33256 if (!i && v == K_NOSUCHMAP) {
33257 @@ -324,9 +324,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33258 int i, j, k;
33259 int ret;
33260
33261 - if (!capable(CAP_SYS_TTY_CONFIG))
33262 - perm = 0;
33263 -
33264 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33265 if (!kbs) {
33266 ret = -ENOMEM;
33267 @@ -360,6 +357,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33268 kfree(kbs);
33269 return ((p && *p) ? -EOVERFLOW : 0);
33270 case KDSKBSENT:
33271 + if (!capable(CAP_SYS_TTY_CONFIG))
33272 + perm = 0;
33273 +
33274 if (!perm) {
33275 ret = -EPERM;
33276 goto reterr;
33277 diff -urNp linux-2.6.39.4/drivers/uio/uio.c linux-2.6.39.4/drivers/uio/uio.c
33278 --- linux-2.6.39.4/drivers/uio/uio.c 2011-05-19 00:06:34.000000000 -0400
33279 +++ linux-2.6.39.4/drivers/uio/uio.c 2011-08-05 19:44:37.000000000 -0400
33280 @@ -25,6 +25,7 @@
33281 #include <linux/kobject.h>
33282 #include <linux/cdev.h>
33283 #include <linux/uio_driver.h>
33284 +#include <asm/local.h>
33285
33286 #define UIO_MAX_DEVICES (1U << MINORBITS)
33287
33288 @@ -32,10 +33,10 @@ struct uio_device {
33289 struct module *owner;
33290 struct device *dev;
33291 int minor;
33292 - atomic_t event;
33293 + atomic_unchecked_t event;
33294 struct fasync_struct *async_queue;
33295 wait_queue_head_t wait;
33296 - int vma_count;
33297 + local_t vma_count;
33298 struct uio_info *info;
33299 struct kobject *map_dir;
33300 struct kobject *portio_dir;
33301 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33302 struct device_attribute *attr, char *buf)
33303 {
33304 struct uio_device *idev = dev_get_drvdata(dev);
33305 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33306 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33307 }
33308
33309 static struct device_attribute uio_class_attributes[] = {
33310 @@ -402,7 +403,7 @@ void uio_event_notify(struct uio_info *i
33311 {
33312 struct uio_device *idev = info->uio_dev;
33313
33314 - atomic_inc(&idev->event);
33315 + atomic_inc_unchecked(&idev->event);
33316 wake_up_interruptible(&idev->wait);
33317 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33318 }
33319 @@ -455,7 +456,7 @@ static int uio_open(struct inode *inode,
33320 }
33321
33322 listener->dev = idev;
33323 - listener->event_count = atomic_read(&idev->event);
33324 + listener->event_count = atomic_read_unchecked(&idev->event);
33325 filep->private_data = listener;
33326
33327 if (idev->info->open) {
33328 @@ -506,7 +507,7 @@ static unsigned int uio_poll(struct file
33329 return -EIO;
33330
33331 poll_wait(filep, &idev->wait, wait);
33332 - if (listener->event_count != atomic_read(&idev->event))
33333 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33334 return POLLIN | POLLRDNORM;
33335 return 0;
33336 }
33337 @@ -531,7 +532,7 @@ static ssize_t uio_read(struct file *fil
33338 do {
33339 set_current_state(TASK_INTERRUPTIBLE);
33340
33341 - event_count = atomic_read(&idev->event);
33342 + event_count = atomic_read_unchecked(&idev->event);
33343 if (event_count != listener->event_count) {
33344 if (copy_to_user(buf, &event_count, count))
33345 retval = -EFAULT;
33346 @@ -602,13 +603,13 @@ static int uio_find_mem_index(struct vm_
33347 static void uio_vma_open(struct vm_area_struct *vma)
33348 {
33349 struct uio_device *idev = vma->vm_private_data;
33350 - idev->vma_count++;
33351 + local_inc(&idev->vma_count);
33352 }
33353
33354 static void uio_vma_close(struct vm_area_struct *vma)
33355 {
33356 struct uio_device *idev = vma->vm_private_data;
33357 - idev->vma_count--;
33358 + local_dec(&idev->vma_count);
33359 }
33360
33361 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33362 @@ -819,7 +820,7 @@ int __uio_register_device(struct module
33363 idev->owner = owner;
33364 idev->info = info;
33365 init_waitqueue_head(&idev->wait);
33366 - atomic_set(&idev->event, 0);
33367 + atomic_set_unchecked(&idev->event, 0);
33368
33369 ret = uio_get_minor(idev);
33370 if (ret)
33371 diff -urNp linux-2.6.39.4/drivers/usb/atm/cxacru.c linux-2.6.39.4/drivers/usb/atm/cxacru.c
33372 --- linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-05-19 00:06:34.000000000 -0400
33373 +++ linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-08-05 19:44:37.000000000 -0400
33374 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33375 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33376 if (ret < 2)
33377 return -EINVAL;
33378 - if (index < 0 || index > 0x7f)
33379 + if (index > 0x7f)
33380 return -EINVAL;
33381 pos += tmp;
33382
33383 diff -urNp linux-2.6.39.4/drivers/usb/atm/usbatm.c linux-2.6.39.4/drivers/usb/atm/usbatm.c
33384 --- linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-05-19 00:06:34.000000000 -0400
33385 +++ linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-08-05 19:44:37.000000000 -0400
33386 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33387 if (printk_ratelimit())
33388 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33389 __func__, vpi, vci);
33390 - atomic_inc(&vcc->stats->rx_err);
33391 + atomic_inc_unchecked(&vcc->stats->rx_err);
33392 return;
33393 }
33394
33395 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33396 if (length > ATM_MAX_AAL5_PDU) {
33397 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33398 __func__, length, vcc);
33399 - atomic_inc(&vcc->stats->rx_err);
33400 + atomic_inc_unchecked(&vcc->stats->rx_err);
33401 goto out;
33402 }
33403
33404 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33405 if (sarb->len < pdu_length) {
33406 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33407 __func__, pdu_length, sarb->len, vcc);
33408 - atomic_inc(&vcc->stats->rx_err);
33409 + atomic_inc_unchecked(&vcc->stats->rx_err);
33410 goto out;
33411 }
33412
33413 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33414 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33415 __func__, vcc);
33416 - atomic_inc(&vcc->stats->rx_err);
33417 + atomic_inc_unchecked(&vcc->stats->rx_err);
33418 goto out;
33419 }
33420
33421 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33422 if (printk_ratelimit())
33423 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33424 __func__, length);
33425 - atomic_inc(&vcc->stats->rx_drop);
33426 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33427 goto out;
33428 }
33429
33430 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33431
33432 vcc->push(vcc, skb);
33433
33434 - atomic_inc(&vcc->stats->rx);
33435 + atomic_inc_unchecked(&vcc->stats->rx);
33436 out:
33437 skb_trim(sarb, 0);
33438 }
33439 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33440 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33441
33442 usbatm_pop(vcc, skb);
33443 - atomic_inc(&vcc->stats->tx);
33444 + atomic_inc_unchecked(&vcc->stats->tx);
33445
33446 skb = skb_dequeue(&instance->sndqueue);
33447 }
33448 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33449 if (!left--)
33450 return sprintf(page,
33451 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33452 - atomic_read(&atm_dev->stats.aal5.tx),
33453 - atomic_read(&atm_dev->stats.aal5.tx_err),
33454 - atomic_read(&atm_dev->stats.aal5.rx),
33455 - atomic_read(&atm_dev->stats.aal5.rx_err),
33456 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33457 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33458 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33459 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33460 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33461 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33462
33463 if (!left--) {
33464 if (instance->disconnected)
33465 diff -urNp linux-2.6.39.4/drivers/usb/core/devices.c linux-2.6.39.4/drivers/usb/core/devices.c
33466 --- linux-2.6.39.4/drivers/usb/core/devices.c 2011-05-19 00:06:34.000000000 -0400
33467 +++ linux-2.6.39.4/drivers/usb/core/devices.c 2011-08-05 19:44:37.000000000 -0400
33468 @@ -126,7 +126,7 @@ static const char *format_endpt =
33469 * time it gets called.
33470 */
33471 static struct device_connect_event {
33472 - atomic_t count;
33473 + atomic_unchecked_t count;
33474 wait_queue_head_t wait;
33475 } device_event = {
33476 .count = ATOMIC_INIT(1),
33477 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33478
33479 void usbfs_conn_disc_event(void)
33480 {
33481 - atomic_add(2, &device_event.count);
33482 + atomic_add_unchecked(2, &device_event.count);
33483 wake_up(&device_event.wait);
33484 }
33485
33486 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33487
33488 poll_wait(file, &device_event.wait, wait);
33489
33490 - event_count = atomic_read(&device_event.count);
33491 + event_count = atomic_read_unchecked(&device_event.count);
33492 if (file->f_version != event_count) {
33493 file->f_version = event_count;
33494 return POLLIN | POLLRDNORM;
33495 diff -urNp linux-2.6.39.4/drivers/usb/core/message.c linux-2.6.39.4/drivers/usb/core/message.c
33496 --- linux-2.6.39.4/drivers/usb/core/message.c 2011-07-09 09:18:51.000000000 -0400
33497 +++ linux-2.6.39.4/drivers/usb/core/message.c 2011-08-05 19:44:37.000000000 -0400
33498 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33499 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33500 if (buf) {
33501 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33502 - if (len > 0) {
33503 - smallbuf = kmalloc(++len, GFP_NOIO);
33504 + if (len++ > 0) {
33505 + smallbuf = kmalloc(len, GFP_NOIO);
33506 if (!smallbuf)
33507 return buf;
33508 memcpy(smallbuf, buf, len);
33509 diff -urNp linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c
33510 --- linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-05-19 00:06:34.000000000 -0400
33511 +++ linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-08-05 20:34:06.000000000 -0400
33512 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33513
33514 #ifdef CONFIG_KGDB
33515 static struct kgdb_io kgdbdbgp_io_ops;
33516 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33517 +static struct kgdb_io kgdbdbgp_io_ops_console;
33518 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33519 #else
33520 #define dbgp_kgdb_mode (0)
33521 #endif
33522 @@ -1032,6 +1033,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33523 .write_char = kgdbdbgp_write_char,
33524 };
33525
33526 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33527 + .name = "kgdbdbgp",
33528 + .read_char = kgdbdbgp_read_char,
33529 + .write_char = kgdbdbgp_write_char,
33530 + .is_console = 1
33531 +};
33532 +
33533 static int kgdbdbgp_wait_time;
33534
33535 static int __init kgdbdbgp_parse_config(char *str)
33536 @@ -1047,8 +1055,10 @@ static int __init kgdbdbgp_parse_config(
33537 ptr++;
33538 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33539 }
33540 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33541 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33542 + if (early_dbgp_console.index != -1)
33543 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33544 + else
33545 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33546
33547 return 0;
33548 }
33549 diff -urNp linux-2.6.39.4/drivers/usb/host/xhci-mem.c linux-2.6.39.4/drivers/usb/host/xhci-mem.c
33550 --- linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-06-25 12:55:23.000000000 -0400
33551 +++ linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-08-05 19:44:37.000000000 -0400
33552 @@ -1680,6 +1680,8 @@ static int xhci_check_trb_in_td_math(str
33553 unsigned int num_tests;
33554 int i, ret;
33555
33556 + pax_track_stack();
33557 +
33558 num_tests = ARRAY_SIZE(simple_test_vector);
33559 for (i = 0; i < num_tests; i++) {
33560 ret = xhci_test_trb_in_td(xhci,
33561 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h
33562 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-05-19 00:06:34.000000000 -0400
33563 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-08-05 19:44:37.000000000 -0400
33564 @@ -192,7 +192,7 @@ struct wahc {
33565 struct list_head xfer_delayed_list;
33566 spinlock_t xfer_list_lock;
33567 struct work_struct xfer_work;
33568 - atomic_t xfer_id_count;
33569 + atomic_unchecked_t xfer_id_count;
33570 };
33571
33572
33573 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33574 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33575 spin_lock_init(&wa->xfer_list_lock);
33576 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33577 - atomic_set(&wa->xfer_id_count, 1);
33578 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33579 }
33580
33581 /**
33582 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c
33583 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-05-19 00:06:34.000000000 -0400
33584 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-05 19:44:37.000000000 -0400
33585 @@ -294,7 +294,7 @@ out:
33586 */
33587 static void wa_xfer_id_init(struct wa_xfer *xfer)
33588 {
33589 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33590 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33591 }
33592
33593 /*
33594 diff -urNp linux-2.6.39.4/drivers/vhost/vhost.c linux-2.6.39.4/drivers/vhost/vhost.c
33595 --- linux-2.6.39.4/drivers/vhost/vhost.c 2011-05-19 00:06:34.000000000 -0400
33596 +++ linux-2.6.39.4/drivers/vhost/vhost.c 2011-08-05 19:44:37.000000000 -0400
33597 @@ -580,7 +580,7 @@ static int init_used(struct vhost_virtqu
33598 return get_user(vq->last_used_idx, &used->idx);
33599 }
33600
33601 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33602 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33603 {
33604 struct file *eventfp, *filep = NULL,
33605 *pollstart = NULL, *pollstop = NULL;
33606 diff -urNp linux-2.6.39.4/drivers/video/fbcmap.c linux-2.6.39.4/drivers/video/fbcmap.c
33607 --- linux-2.6.39.4/drivers/video/fbcmap.c 2011-05-19 00:06:34.000000000 -0400
33608 +++ linux-2.6.39.4/drivers/video/fbcmap.c 2011-08-05 19:44:37.000000000 -0400
33609 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33610 rc = -ENODEV;
33611 goto out;
33612 }
33613 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33614 - !info->fbops->fb_setcmap)) {
33615 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33616 rc = -EINVAL;
33617 goto out1;
33618 }
33619 diff -urNp linux-2.6.39.4/drivers/video/fbmem.c linux-2.6.39.4/drivers/video/fbmem.c
33620 --- linux-2.6.39.4/drivers/video/fbmem.c 2011-05-19 00:06:34.000000000 -0400
33621 +++ linux-2.6.39.4/drivers/video/fbmem.c 2011-08-05 19:44:37.000000000 -0400
33622 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33623 image->dx += image->width + 8;
33624 }
33625 } else if (rotate == FB_ROTATE_UD) {
33626 - for (x = 0; x < num && image->dx >= 0; x++) {
33627 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33628 info->fbops->fb_imageblit(info, image);
33629 image->dx -= image->width + 8;
33630 }
33631 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33632 image->dy += image->height + 8;
33633 }
33634 } else if (rotate == FB_ROTATE_CCW) {
33635 - for (x = 0; x < num && image->dy >= 0; x++) {
33636 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33637 info->fbops->fb_imageblit(info, image);
33638 image->dy -= image->height + 8;
33639 }
33640 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33641 int flags = info->flags;
33642 int ret = 0;
33643
33644 + pax_track_stack();
33645 +
33646 if (var->activate & FB_ACTIVATE_INV_MODE) {
33647 struct fb_videomode mode1, mode2;
33648
33649 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33650 void __user *argp = (void __user *)arg;
33651 long ret = 0;
33652
33653 + pax_track_stack();
33654 +
33655 switch (cmd) {
33656 case FBIOGET_VSCREENINFO:
33657 if (!lock_fb_info(info))
33658 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33659 return -EFAULT;
33660 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33661 return -EINVAL;
33662 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33663 + if (con2fb.framebuffer >= FB_MAX)
33664 return -EINVAL;
33665 if (!registered_fb[con2fb.framebuffer])
33666 request_module("fb%d", con2fb.framebuffer);
33667 diff -urNp linux-2.6.39.4/drivers/video/i810/i810_accel.c linux-2.6.39.4/drivers/video/i810/i810_accel.c
33668 --- linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-05-19 00:06:34.000000000 -0400
33669 +++ linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-08-05 19:44:37.000000000 -0400
33670 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33671 }
33672 }
33673 printk("ringbuffer lockup!!!\n");
33674 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33675 i810_report_error(mmio);
33676 par->dev_flags |= LOCKUP;
33677 info->pixmap.scan_align = 1;
33678 diff -urNp linux-2.6.39.4/drivers/video/udlfb.c linux-2.6.39.4/drivers/video/udlfb.c
33679 --- linux-2.6.39.4/drivers/video/udlfb.c 2011-05-19 00:06:34.000000000 -0400
33680 +++ linux-2.6.39.4/drivers/video/udlfb.c 2011-08-05 19:44:37.000000000 -0400
33681 @@ -584,11 +584,11 @@ int dlfb_handle_damage(struct dlfb_data
33682 dlfb_urb_completion(urb);
33683
33684 error:
33685 - atomic_add(bytes_sent, &dev->bytes_sent);
33686 - atomic_add(bytes_identical, &dev->bytes_identical);
33687 - atomic_add(width*height*2, &dev->bytes_rendered);
33688 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33689 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33690 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33691 end_cycles = get_cycles();
33692 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33693 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33694 >> 10)), /* Kcycles */
33695 &dev->cpu_kcycles_used);
33696
33697 @@ -709,11 +709,11 @@ static void dlfb_dpy_deferred_io(struct
33698 dlfb_urb_completion(urb);
33699
33700 error:
33701 - atomic_add(bytes_sent, &dev->bytes_sent);
33702 - atomic_add(bytes_identical, &dev->bytes_identical);
33703 - atomic_add(bytes_rendered, &dev->bytes_rendered);
33704 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33705 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33706 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33707 end_cycles = get_cycles();
33708 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33709 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33710 >> 10)), /* Kcycles */
33711 &dev->cpu_kcycles_used);
33712 }
33713 @@ -1301,7 +1301,7 @@ static ssize_t metrics_bytes_rendered_sh
33714 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33715 struct dlfb_data *dev = fb_info->par;
33716 return snprintf(buf, PAGE_SIZE, "%u\n",
33717 - atomic_read(&dev->bytes_rendered));
33718 + atomic_read_unchecked(&dev->bytes_rendered));
33719 }
33720
33721 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33722 @@ -1309,7 +1309,7 @@ static ssize_t metrics_bytes_identical_s
33723 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33724 struct dlfb_data *dev = fb_info->par;
33725 return snprintf(buf, PAGE_SIZE, "%u\n",
33726 - atomic_read(&dev->bytes_identical));
33727 + atomic_read_unchecked(&dev->bytes_identical));
33728 }
33729
33730 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33731 @@ -1317,7 +1317,7 @@ static ssize_t metrics_bytes_sent_show(s
33732 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33733 struct dlfb_data *dev = fb_info->par;
33734 return snprintf(buf, PAGE_SIZE, "%u\n",
33735 - atomic_read(&dev->bytes_sent));
33736 + atomic_read_unchecked(&dev->bytes_sent));
33737 }
33738
33739 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33740 @@ -1325,7 +1325,7 @@ static ssize_t metrics_cpu_kcycles_used_
33741 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33742 struct dlfb_data *dev = fb_info->par;
33743 return snprintf(buf, PAGE_SIZE, "%u\n",
33744 - atomic_read(&dev->cpu_kcycles_used));
33745 + atomic_read_unchecked(&dev->cpu_kcycles_used));
33746 }
33747
33748 static ssize_t edid_show(
33749 @@ -1382,10 +1382,10 @@ static ssize_t metrics_reset_store(struc
33750 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33751 struct dlfb_data *dev = fb_info->par;
33752
33753 - atomic_set(&dev->bytes_rendered, 0);
33754 - atomic_set(&dev->bytes_identical, 0);
33755 - atomic_set(&dev->bytes_sent, 0);
33756 - atomic_set(&dev->cpu_kcycles_used, 0);
33757 + atomic_set_unchecked(&dev->bytes_rendered, 0);
33758 + atomic_set_unchecked(&dev->bytes_identical, 0);
33759 + atomic_set_unchecked(&dev->bytes_sent, 0);
33760 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33761
33762 return count;
33763 }
33764 diff -urNp linux-2.6.39.4/drivers/video/uvesafb.c linux-2.6.39.4/drivers/video/uvesafb.c
33765 --- linux-2.6.39.4/drivers/video/uvesafb.c 2011-05-19 00:06:34.000000000 -0400
33766 +++ linux-2.6.39.4/drivers/video/uvesafb.c 2011-08-05 20:34:06.000000000 -0400
33767 @@ -19,6 +19,7 @@
33768 #include <linux/io.h>
33769 #include <linux/mutex.h>
33770 #include <linux/slab.h>
33771 +#include <linux/moduleloader.h>
33772 #include <video/edid.h>
33773 #include <video/uvesafb.h>
33774 #ifdef CONFIG_X86
33775 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33776 NULL,
33777 };
33778
33779 - return call_usermodehelper(v86d_path, argv, envp, 1);
33780 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33781 }
33782
33783 /*
33784 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33785 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33786 par->pmi_setpal = par->ypan = 0;
33787 } else {
33788 +
33789 +#ifdef CONFIG_PAX_KERNEXEC
33790 +#ifdef CONFIG_MODULES
33791 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33792 +#endif
33793 + if (!par->pmi_code) {
33794 + par->pmi_setpal = par->ypan = 0;
33795 + return 0;
33796 + }
33797 +#endif
33798 +
33799 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33800 + task->t.regs.edi);
33801 +
33802 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33803 + pax_open_kernel();
33804 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33805 + pax_close_kernel();
33806 +
33807 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33808 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33809 +#else
33810 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33811 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33812 +#endif
33813 +
33814 printk(KERN_INFO "uvesafb: protected mode interface info at "
33815 "%04x:%04x\n",
33816 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33817 @@ -1821,6 +1844,11 @@ out:
33818 if (par->vbe_modes)
33819 kfree(par->vbe_modes);
33820
33821 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33822 + if (par->pmi_code)
33823 + module_free_exec(NULL, par->pmi_code);
33824 +#endif
33825 +
33826 framebuffer_release(info);
33827 return err;
33828 }
33829 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33830 kfree(par->vbe_state_orig);
33831 if (par->vbe_state_saved)
33832 kfree(par->vbe_state_saved);
33833 +
33834 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33835 + if (par->pmi_code)
33836 + module_free_exec(NULL, par->pmi_code);
33837 +#endif
33838 +
33839 }
33840
33841 framebuffer_release(info);
33842 diff -urNp linux-2.6.39.4/drivers/video/vesafb.c linux-2.6.39.4/drivers/video/vesafb.c
33843 --- linux-2.6.39.4/drivers/video/vesafb.c 2011-05-19 00:06:34.000000000 -0400
33844 +++ linux-2.6.39.4/drivers/video/vesafb.c 2011-08-05 20:34:06.000000000 -0400
33845 @@ -9,6 +9,7 @@
33846 */
33847
33848 #include <linux/module.h>
33849 +#include <linux/moduleloader.h>
33850 #include <linux/kernel.h>
33851 #include <linux/errno.h>
33852 #include <linux/string.h>
33853 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33854 static int vram_total __initdata; /* Set total amount of memory */
33855 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33856 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33857 -static void (*pmi_start)(void) __read_mostly;
33858 -static void (*pmi_pal) (void) __read_mostly;
33859 +static void (*pmi_start)(void) __read_only;
33860 +static void (*pmi_pal) (void) __read_only;
33861 static int depth __read_mostly;
33862 static int vga_compat __read_mostly;
33863 /* --------------------------------------------------------------------- */
33864 @@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
33865 unsigned int size_vmode;
33866 unsigned int size_remap;
33867 unsigned int size_total;
33868 + void *pmi_code = NULL;
33869
33870 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33871 return -ENODEV;
33872 @@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
33873 size_remap = size_total;
33874 vesafb_fix.smem_len = size_remap;
33875
33876 -#ifndef __i386__
33877 - screen_info.vesapm_seg = 0;
33878 -#endif
33879 -
33880 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33881 printk(KERN_WARNING
33882 "vesafb: cannot reserve video memory at 0x%lx\n",
33883 @@ -306,9 +304,21 @@ static int __init vesafb_probe(struct pl
33884 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33885 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33886
33887 +#ifdef __i386__
33888 +
33889 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33890 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
33891 + if (!pmi_code)
33892 +#elif !defined(CONFIG_PAX_KERNEXEC)
33893 + if (0)
33894 +#endif
33895 +
33896 +#endif
33897 + screen_info.vesapm_seg = 0;
33898 +
33899 if (screen_info.vesapm_seg) {
33900 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33901 - screen_info.vesapm_seg,screen_info.vesapm_off);
33902 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33903 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33904 }
33905
33906 if (screen_info.vesapm_seg < 0xc000)
33907 @@ -316,9 +326,25 @@ static int __init vesafb_probe(struct pl
33908
33909 if (ypan || pmi_setpal) {
33910 unsigned short *pmi_base;
33911 +
33912 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33913 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33914 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33915 +
33916 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33917 + pax_open_kernel();
33918 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33919 +#else
33920 + pmi_code = pmi_base;
33921 +#endif
33922 +
33923 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33924 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33925 +
33926 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33927 + pmi_start = ktva_ktla(pmi_start);
33928 + pmi_pal = ktva_ktla(pmi_pal);
33929 + pax_close_kernel();
33930 +#endif
33931 +
33932 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33933 if (pmi_base[3]) {
33934 printk(KERN_INFO "vesafb: pmi: ports = ");
33935 @@ -487,6 +513,11 @@ static int __init vesafb_probe(struct pl
33936 info->node, info->fix.id);
33937 return 0;
33938 err:
33939 +
33940 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33941 + module_free_exec(NULL, pmi_code);
33942 +#endif
33943 +
33944 if (info->screen_base)
33945 iounmap(info->screen_base);
33946 framebuffer_release(info);
33947 diff -urNp linux-2.6.39.4/drivers/virtio/virtio_balloon.c linux-2.6.39.4/drivers/virtio/virtio_balloon.c
33948 --- linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-05-19 00:06:34.000000000 -0400
33949 +++ linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-08-05 19:44:37.000000000 -0400
33950 @@ -176,6 +176,8 @@ static void update_balloon_stats(struct
33951 struct sysinfo i;
33952 int idx = 0;
33953
33954 + pax_track_stack();
33955 +
33956 all_vm_events(events);
33957 si_meminfo(&i);
33958
33959 diff -urNp linux-2.6.39.4/fs/9p/vfs_inode.c linux-2.6.39.4/fs/9p/vfs_inode.c
33960 --- linux-2.6.39.4/fs/9p/vfs_inode.c 2011-05-19 00:06:34.000000000 -0400
33961 +++ linux-2.6.39.4/fs/9p/vfs_inode.c 2011-08-05 19:44:37.000000000 -0400
33962 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33963 void
33964 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33965 {
33966 - char *s = nd_get_link(nd);
33967 + const char *s = nd_get_link(nd);
33968
33969 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33970 IS_ERR(s) ? "<error>" : s);
33971 diff -urNp linux-2.6.39.4/fs/aio.c linux-2.6.39.4/fs/aio.c
33972 --- linux-2.6.39.4/fs/aio.c 2011-05-19 00:06:34.000000000 -0400
33973 +++ linux-2.6.39.4/fs/aio.c 2011-08-05 19:44:37.000000000 -0400
33974 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33975 size += sizeof(struct io_event) * nr_events;
33976 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33977
33978 - if (nr_pages < 0)
33979 + if (nr_pages <= 0)
33980 return -EINVAL;
33981
33982 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33983 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33984 struct aio_timeout to;
33985 int retry = 0;
33986
33987 + pax_track_stack();
33988 +
33989 /* needed to zero any padding within an entry (there shouldn't be
33990 * any, but C is fun!
33991 */
33992 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33993 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33994 {
33995 ssize_t ret;
33996 + struct iovec iovstack;
33997
33998 #ifdef CONFIG_COMPAT
33999 if (compat)
34000 ret = compat_rw_copy_check_uvector(type,
34001 (struct compat_iovec __user *)kiocb->ki_buf,
34002 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
34003 + kiocb->ki_nbytes, 1, &iovstack,
34004 &kiocb->ki_iovec);
34005 else
34006 #endif
34007 ret = rw_copy_check_uvector(type,
34008 (struct iovec __user *)kiocb->ki_buf,
34009 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
34010 + kiocb->ki_nbytes, 1, &iovstack,
34011 &kiocb->ki_iovec);
34012 if (ret < 0)
34013 goto out;
34014
34015 + if (kiocb->ki_iovec == &iovstack) {
34016 + kiocb->ki_inline_vec = iovstack;
34017 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
34018 + }
34019 kiocb->ki_nr_segs = kiocb->ki_nbytes;
34020 kiocb->ki_cur_seg = 0;
34021 /* ki_nbytes/left now reflect bytes instead of segs */
34022 diff -urNp linux-2.6.39.4/fs/attr.c linux-2.6.39.4/fs/attr.c
34023 --- linux-2.6.39.4/fs/attr.c 2011-05-19 00:06:34.000000000 -0400
34024 +++ linux-2.6.39.4/fs/attr.c 2011-08-05 19:44:37.000000000 -0400
34025 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
34026 unsigned long limit;
34027
34028 limit = rlimit(RLIMIT_FSIZE);
34029 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
34030 if (limit != RLIM_INFINITY && offset > limit)
34031 goto out_sig;
34032 if (offset > inode->i_sb->s_maxbytes)
34033 diff -urNp linux-2.6.39.4/fs/befs/linuxvfs.c linux-2.6.39.4/fs/befs/linuxvfs.c
34034 --- linux-2.6.39.4/fs/befs/linuxvfs.c 2011-05-19 00:06:34.000000000 -0400
34035 +++ linux-2.6.39.4/fs/befs/linuxvfs.c 2011-08-05 19:44:37.000000000 -0400
34036 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
34037 {
34038 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
34039 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
34040 - char *link = nd_get_link(nd);
34041 + const char *link = nd_get_link(nd);
34042 if (!IS_ERR(link))
34043 kfree(link);
34044 }
34045 diff -urNp linux-2.6.39.4/fs/binfmt_aout.c linux-2.6.39.4/fs/binfmt_aout.c
34046 --- linux-2.6.39.4/fs/binfmt_aout.c 2011-05-19 00:06:34.000000000 -0400
34047 +++ linux-2.6.39.4/fs/binfmt_aout.c 2011-08-05 19:44:37.000000000 -0400
34048 @@ -16,6 +16,7 @@
34049 #include <linux/string.h>
34050 #include <linux/fs.h>
34051 #include <linux/file.h>
34052 +#include <linux/security.h>
34053 #include <linux/stat.h>
34054 #include <linux/fcntl.h>
34055 #include <linux/ptrace.h>
34056 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
34057 #endif
34058 # define START_STACK(u) ((void __user *)u.start_stack)
34059
34060 + memset(&dump, 0, sizeof(dump));
34061 +
34062 fs = get_fs();
34063 set_fs(KERNEL_DS);
34064 has_dumped = 1;
34065 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
34066
34067 /* If the size of the dump file exceeds the rlimit, then see what would happen
34068 if we wrote the stack, but not the data area. */
34069 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
34070 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
34071 dump.u_dsize = 0;
34072
34073 /* Make sure we have enough room to write the stack and data areas. */
34074 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
34075 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
34076 dump.u_ssize = 0;
34077
34078 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
34079 rlim = rlimit(RLIMIT_DATA);
34080 if (rlim >= RLIM_INFINITY)
34081 rlim = ~0;
34082 +
34083 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
34084 if (ex.a_data + ex.a_bss > rlim)
34085 return -ENOMEM;
34086
34087 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
34088 install_exec_creds(bprm);
34089 current->flags &= ~PF_FORKNOEXEC;
34090
34091 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34092 + current->mm->pax_flags = 0UL;
34093 +#endif
34094 +
34095 +#ifdef CONFIG_PAX_PAGEEXEC
34096 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
34097 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
34098 +
34099 +#ifdef CONFIG_PAX_EMUTRAMP
34100 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
34101 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
34102 +#endif
34103 +
34104 +#ifdef CONFIG_PAX_MPROTECT
34105 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
34106 + current->mm->pax_flags |= MF_PAX_MPROTECT;
34107 +#endif
34108 +
34109 + }
34110 +#endif
34111 +
34112 if (N_MAGIC(ex) == OMAGIC) {
34113 unsigned long text_addr, map_size;
34114 loff_t pos;
34115 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
34116
34117 down_write(&current->mm->mmap_sem);
34118 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
34119 - PROT_READ | PROT_WRITE | PROT_EXEC,
34120 + PROT_READ | PROT_WRITE,
34121 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
34122 fd_offset + ex.a_text);
34123 up_write(&current->mm->mmap_sem);
34124 diff -urNp linux-2.6.39.4/fs/binfmt_elf.c linux-2.6.39.4/fs/binfmt_elf.c
34125 --- linux-2.6.39.4/fs/binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
34126 +++ linux-2.6.39.4/fs/binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
34127 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34128 #define elf_core_dump NULL
34129 #endif
34130
34131 +#ifdef CONFIG_PAX_MPROTECT
34132 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34133 +#endif
34134 +
34135 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34136 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34137 #else
34138 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34139 .load_binary = load_elf_binary,
34140 .load_shlib = load_elf_library,
34141 .core_dump = elf_core_dump,
34142 +
34143 +#ifdef CONFIG_PAX_MPROTECT
34144 + .handle_mprotect= elf_handle_mprotect,
34145 +#endif
34146 +
34147 .min_coredump = ELF_EXEC_PAGESIZE,
34148 };
34149
34150 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34151
34152 static int set_brk(unsigned long start, unsigned long end)
34153 {
34154 + unsigned long e = end;
34155 +
34156 start = ELF_PAGEALIGN(start);
34157 end = ELF_PAGEALIGN(end);
34158 if (end > start) {
34159 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34160 if (BAD_ADDR(addr))
34161 return addr;
34162 }
34163 - current->mm->start_brk = current->mm->brk = end;
34164 + current->mm->start_brk = current->mm->brk = e;
34165 return 0;
34166 }
34167
34168 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34169 elf_addr_t __user *u_rand_bytes;
34170 const char *k_platform = ELF_PLATFORM;
34171 const char *k_base_platform = ELF_BASE_PLATFORM;
34172 - unsigned char k_rand_bytes[16];
34173 + u32 k_rand_bytes[4];
34174 int items;
34175 elf_addr_t *elf_info;
34176 int ei_index = 0;
34177 const struct cred *cred = current_cred();
34178 struct vm_area_struct *vma;
34179 + unsigned long saved_auxv[AT_VECTOR_SIZE];
34180 +
34181 + pax_track_stack();
34182
34183 /*
34184 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34185 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34186 * Generate 16 random bytes for userspace PRNG seeding.
34187 */
34188 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34189 - u_rand_bytes = (elf_addr_t __user *)
34190 - STACK_ALLOC(p, sizeof(k_rand_bytes));
34191 + srandom32(k_rand_bytes[0] ^ random32());
34192 + srandom32(k_rand_bytes[1] ^ random32());
34193 + srandom32(k_rand_bytes[2] ^ random32());
34194 + srandom32(k_rand_bytes[3] ^ random32());
34195 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
34196 + u_rand_bytes = (elf_addr_t __user *) p;
34197 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34198 return -EFAULT;
34199
34200 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34201 return -EFAULT;
34202 current->mm->env_end = p;
34203
34204 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34205 +
34206 /* Put the elf_info on the stack in the right place. */
34207 sp = (elf_addr_t __user *)envp + 1;
34208 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34209 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34210 return -EFAULT;
34211 return 0;
34212 }
34213 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34214 {
34215 struct elf_phdr *elf_phdata;
34216 struct elf_phdr *eppnt;
34217 - unsigned long load_addr = 0;
34218 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34219 int load_addr_set = 0;
34220 unsigned long last_bss = 0, elf_bss = 0;
34221 - unsigned long error = ~0UL;
34222 + unsigned long error = -EINVAL;
34223 unsigned long total_size;
34224 int retval, i, size;
34225
34226 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34227 goto out_close;
34228 }
34229
34230 +#ifdef CONFIG_PAX_SEGMEXEC
34231 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34232 + pax_task_size = SEGMEXEC_TASK_SIZE;
34233 +#endif
34234 +
34235 eppnt = elf_phdata;
34236 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34237 if (eppnt->p_type == PT_LOAD) {
34238 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34239 k = load_addr + eppnt->p_vaddr;
34240 if (BAD_ADDR(k) ||
34241 eppnt->p_filesz > eppnt->p_memsz ||
34242 - eppnt->p_memsz > TASK_SIZE ||
34243 - TASK_SIZE - eppnt->p_memsz < k) {
34244 + eppnt->p_memsz > pax_task_size ||
34245 + pax_task_size - eppnt->p_memsz < k) {
34246 error = -ENOMEM;
34247 goto out_close;
34248 }
34249 @@ -528,6 +553,193 @@ out:
34250 return error;
34251 }
34252
34253 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34254 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34255 +{
34256 + unsigned long pax_flags = 0UL;
34257 +
34258 +#ifdef CONFIG_PAX_PAGEEXEC
34259 + if (elf_phdata->p_flags & PF_PAGEEXEC)
34260 + pax_flags |= MF_PAX_PAGEEXEC;
34261 +#endif
34262 +
34263 +#ifdef CONFIG_PAX_SEGMEXEC
34264 + if (elf_phdata->p_flags & PF_SEGMEXEC)
34265 + pax_flags |= MF_PAX_SEGMEXEC;
34266 +#endif
34267 +
34268 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34269 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34270 + if ((__supported_pte_mask & _PAGE_NX))
34271 + pax_flags &= ~MF_PAX_SEGMEXEC;
34272 + else
34273 + pax_flags &= ~MF_PAX_PAGEEXEC;
34274 + }
34275 +#endif
34276 +
34277 +#ifdef CONFIG_PAX_EMUTRAMP
34278 + if (elf_phdata->p_flags & PF_EMUTRAMP)
34279 + pax_flags |= MF_PAX_EMUTRAMP;
34280 +#endif
34281 +
34282 +#ifdef CONFIG_PAX_MPROTECT
34283 + if (elf_phdata->p_flags & PF_MPROTECT)
34284 + pax_flags |= MF_PAX_MPROTECT;
34285 +#endif
34286 +
34287 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34288 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34289 + pax_flags |= MF_PAX_RANDMMAP;
34290 +#endif
34291 +
34292 + return pax_flags;
34293 +}
34294 +#endif
34295 +
34296 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34297 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34298 +{
34299 + unsigned long pax_flags = 0UL;
34300 +
34301 +#ifdef CONFIG_PAX_PAGEEXEC
34302 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34303 + pax_flags |= MF_PAX_PAGEEXEC;
34304 +#endif
34305 +
34306 +#ifdef CONFIG_PAX_SEGMEXEC
34307 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34308 + pax_flags |= MF_PAX_SEGMEXEC;
34309 +#endif
34310 +
34311 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34312 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34313 + if ((__supported_pte_mask & _PAGE_NX))
34314 + pax_flags &= ~MF_PAX_SEGMEXEC;
34315 + else
34316 + pax_flags &= ~MF_PAX_PAGEEXEC;
34317 + }
34318 +#endif
34319 +
34320 +#ifdef CONFIG_PAX_EMUTRAMP
34321 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34322 + pax_flags |= MF_PAX_EMUTRAMP;
34323 +#endif
34324 +
34325 +#ifdef CONFIG_PAX_MPROTECT
34326 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34327 + pax_flags |= MF_PAX_MPROTECT;
34328 +#endif
34329 +
34330 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34331 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34332 + pax_flags |= MF_PAX_RANDMMAP;
34333 +#endif
34334 +
34335 + return pax_flags;
34336 +}
34337 +#endif
34338 +
34339 +#ifdef CONFIG_PAX_EI_PAX
34340 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34341 +{
34342 + unsigned long pax_flags = 0UL;
34343 +
34344 +#ifdef CONFIG_PAX_PAGEEXEC
34345 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34346 + pax_flags |= MF_PAX_PAGEEXEC;
34347 +#endif
34348 +
34349 +#ifdef CONFIG_PAX_SEGMEXEC
34350 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34351 + pax_flags |= MF_PAX_SEGMEXEC;
34352 +#endif
34353 +
34354 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34355 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34356 + if ((__supported_pte_mask & _PAGE_NX))
34357 + pax_flags &= ~MF_PAX_SEGMEXEC;
34358 + else
34359 + pax_flags &= ~MF_PAX_PAGEEXEC;
34360 + }
34361 +#endif
34362 +
34363 +#ifdef CONFIG_PAX_EMUTRAMP
34364 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34365 + pax_flags |= MF_PAX_EMUTRAMP;
34366 +#endif
34367 +
34368 +#ifdef CONFIG_PAX_MPROTECT
34369 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34370 + pax_flags |= MF_PAX_MPROTECT;
34371 +#endif
34372 +
34373 +#ifdef CONFIG_PAX_ASLR
34374 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34375 + pax_flags |= MF_PAX_RANDMMAP;
34376 +#endif
34377 +
34378 + return pax_flags;
34379 +}
34380 +#endif
34381 +
34382 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34383 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34384 +{
34385 + unsigned long pax_flags = 0UL;
34386 +
34387 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34388 + unsigned long i;
34389 + int found_flags = 0;
34390 +#endif
34391 +
34392 +#ifdef CONFIG_PAX_EI_PAX
34393 + pax_flags = pax_parse_ei_pax(elf_ex);
34394 +#endif
34395 +
34396 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34397 + for (i = 0UL; i < elf_ex->e_phnum; i++)
34398 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34399 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34400 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34401 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34402 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34403 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34404 + return -EINVAL;
34405 +
34406 +#ifdef CONFIG_PAX_SOFTMODE
34407 + if (pax_softmode)
34408 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
34409 + else
34410 +#endif
34411 +
34412 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34413 + found_flags = 1;
34414 + break;
34415 + }
34416 +#endif
34417 +
34418 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34419 + if (found_flags == 0) {
34420 + struct elf_phdr phdr;
34421 + memset(&phdr, 0, sizeof(phdr));
34422 + phdr.p_flags = PF_NOEMUTRAMP;
34423 +#ifdef CONFIG_PAX_SOFTMODE
34424 + if (pax_softmode)
34425 + pax_flags = pax_parse_softmode(&phdr);
34426 + else
34427 +#endif
34428 + pax_flags = pax_parse_hardmode(&phdr);
34429 + }
34430 +#endif
34431 +
34432 + if (0 > pax_check_flags(&pax_flags))
34433 + return -EINVAL;
34434 +
34435 + current->mm->pax_flags = pax_flags;
34436 + return 0;
34437 +}
34438 +#endif
34439 +
34440 /*
34441 * These are the functions used to load ELF style executables and shared
34442 * libraries. There is no binary dependent code anywhere else.
34443 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34444 {
34445 unsigned int random_variable = 0;
34446
34447 +#ifdef CONFIG_PAX_RANDUSTACK
34448 + if (randomize_va_space)
34449 + return stack_top - current->mm->delta_stack;
34450 +#endif
34451 +
34452 if ((current->flags & PF_RANDOMIZE) &&
34453 !(current->personality & ADDR_NO_RANDOMIZE)) {
34454 random_variable = get_random_int() & STACK_RND_MASK;
34455 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34456 unsigned long load_addr = 0, load_bias = 0;
34457 int load_addr_set = 0;
34458 char * elf_interpreter = NULL;
34459 - unsigned long error;
34460 + unsigned long error = 0;
34461 struct elf_phdr *elf_ppnt, *elf_phdata;
34462 unsigned long elf_bss, elf_brk;
34463 int retval, i;
34464 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34465 unsigned long start_code, end_code, start_data, end_data;
34466 unsigned long reloc_func_desc __maybe_unused = 0;
34467 int executable_stack = EXSTACK_DEFAULT;
34468 - unsigned long def_flags = 0;
34469 struct {
34470 struct elfhdr elf_ex;
34471 struct elfhdr interp_elf_ex;
34472 } *loc;
34473 + unsigned long pax_task_size = TASK_SIZE;
34474
34475 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34476 if (!loc) {
34477 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34478
34479 /* OK, This is the point of no return */
34480 current->flags &= ~PF_FORKNOEXEC;
34481 - current->mm->def_flags = def_flags;
34482 +
34483 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34484 + current->mm->pax_flags = 0UL;
34485 +#endif
34486 +
34487 +#ifdef CONFIG_PAX_DLRESOLVE
34488 + current->mm->call_dl_resolve = 0UL;
34489 +#endif
34490 +
34491 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34492 + current->mm->call_syscall = 0UL;
34493 +#endif
34494 +
34495 +#ifdef CONFIG_PAX_ASLR
34496 + current->mm->delta_mmap = 0UL;
34497 + current->mm->delta_stack = 0UL;
34498 +#endif
34499 +
34500 + current->mm->def_flags = 0;
34501 +
34502 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34503 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34504 + send_sig(SIGKILL, current, 0);
34505 + goto out_free_dentry;
34506 + }
34507 +#endif
34508 +
34509 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34510 + pax_set_initial_flags(bprm);
34511 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34512 + if (pax_set_initial_flags_func)
34513 + (pax_set_initial_flags_func)(bprm);
34514 +#endif
34515 +
34516 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34517 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34518 + current->mm->context.user_cs_limit = PAGE_SIZE;
34519 + current->mm->def_flags |= VM_PAGEEXEC;
34520 + }
34521 +#endif
34522 +
34523 +#ifdef CONFIG_PAX_SEGMEXEC
34524 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34525 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34526 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34527 + pax_task_size = SEGMEXEC_TASK_SIZE;
34528 + current->mm->def_flags |= VM_NOHUGEPAGE;
34529 + }
34530 +#endif
34531 +
34532 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34533 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34534 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34535 + put_cpu();
34536 + }
34537 +#endif
34538
34539 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34540 may depend on the personality. */
34541 SET_PERSONALITY(loc->elf_ex);
34542 +
34543 +#ifdef CONFIG_PAX_ASLR
34544 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34545 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34546 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34547 + }
34548 +#endif
34549 +
34550 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34551 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34552 + executable_stack = EXSTACK_DISABLE_X;
34553 + current->personality &= ~READ_IMPLIES_EXEC;
34554 + } else
34555 +#endif
34556 +
34557 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34558 current->personality |= READ_IMPLIES_EXEC;
34559
34560 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34561 #else
34562 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34563 #endif
34564 +
34565 +#ifdef CONFIG_PAX_RANDMMAP
34566 + /* PaX: randomize base address at the default exe base if requested */
34567 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34568 +#ifdef CONFIG_SPARC64
34569 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34570 +#else
34571 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34572 +#endif
34573 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34574 + elf_flags |= MAP_FIXED;
34575 + }
34576 +#endif
34577 +
34578 }
34579
34580 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34581 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34582 * allowed task size. Note that p_filesz must always be
34583 * <= p_memsz so it is only necessary to check p_memsz.
34584 */
34585 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34586 - elf_ppnt->p_memsz > TASK_SIZE ||
34587 - TASK_SIZE - elf_ppnt->p_memsz < k) {
34588 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34589 + elf_ppnt->p_memsz > pax_task_size ||
34590 + pax_task_size - elf_ppnt->p_memsz < k) {
34591 /* set_brk can never work. Avoid overflows. */
34592 send_sig(SIGKILL, current, 0);
34593 retval = -EINVAL;
34594 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34595 start_data += load_bias;
34596 end_data += load_bias;
34597
34598 +#ifdef CONFIG_PAX_RANDMMAP
34599 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34600 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34601 +#endif
34602 +
34603 /* Calling set_brk effectively mmaps the pages that we need
34604 * for the bss and break sections. We must do this before
34605 * mapping in the interpreter, to make sure it doesn't wind
34606 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34607 goto out_free_dentry;
34608 }
34609 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34610 - send_sig(SIGSEGV, current, 0);
34611 - retval = -EFAULT; /* Nobody gets to see this, but.. */
34612 - goto out_free_dentry;
34613 + /*
34614 + * This bss-zeroing can fail if the ELF
34615 + * file specifies odd protections. So
34616 + * we don't check the return value
34617 + */
34618 }
34619
34620 if (elf_interpreter) {
34621 @@ -1090,7 +1398,7 @@ out:
34622 * Decide what to dump of a segment, part, all or none.
34623 */
34624 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34625 - unsigned long mm_flags)
34626 + unsigned long mm_flags, long signr)
34627 {
34628 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34629
34630 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34631 if (vma->vm_file == NULL)
34632 return 0;
34633
34634 - if (FILTER(MAPPED_PRIVATE))
34635 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34636 goto whole;
34637
34638 /*
34639 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34640 {
34641 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34642 int i = 0;
34643 - do
34644 + do {
34645 i += 2;
34646 - while (auxv[i - 2] != AT_NULL);
34647 + } while (auxv[i - 2] != AT_NULL);
34648 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34649 }
34650
34651 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34652 }
34653
34654 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34655 - unsigned long mm_flags)
34656 + struct coredump_params *cprm)
34657 {
34658 struct vm_area_struct *vma;
34659 size_t size = 0;
34660
34661 for (vma = first_vma(current, gate_vma); vma != NULL;
34662 vma = next_vma(vma, gate_vma))
34663 - size += vma_dump_size(vma, mm_flags);
34664 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34665 return size;
34666 }
34667
34668 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34669
34670 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34671
34672 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34673 + offset += elf_core_vma_data_size(gate_vma, cprm);
34674 offset += elf_core_extra_data_size();
34675 e_shoff = offset;
34676
34677 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34678 offset = dataoff;
34679
34680 size += sizeof(*elf);
34681 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34682 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34683 goto end_coredump;
34684
34685 size += sizeof(*phdr4note);
34686 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34687 if (size > cprm->limit
34688 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34689 goto end_coredump;
34690 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34691 phdr.p_offset = offset;
34692 phdr.p_vaddr = vma->vm_start;
34693 phdr.p_paddr = 0;
34694 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34695 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34696 phdr.p_memsz = vma->vm_end - vma->vm_start;
34697 offset += phdr.p_filesz;
34698 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34699 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34700 phdr.p_align = ELF_EXEC_PAGESIZE;
34701
34702 size += sizeof(phdr);
34703 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34704 if (size > cprm->limit
34705 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34706 goto end_coredump;
34707 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34708 unsigned long addr;
34709 unsigned long end;
34710
34711 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34712 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34713
34714 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34715 struct page *page;
34716 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34717 page = get_dump_page(addr);
34718 if (page) {
34719 void *kaddr = kmap(page);
34720 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34721 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34722 !dump_write(cprm->file, kaddr,
34723 PAGE_SIZE);
34724 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34725
34726 if (e_phnum == PN_XNUM) {
34727 size += sizeof(*shdr4extnum);
34728 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34729 if (size > cprm->limit
34730 || !dump_write(cprm->file, shdr4extnum,
34731 sizeof(*shdr4extnum)))
34732 @@ -2067,6 +2380,97 @@ out:
34733
34734 #endif /* CONFIG_ELF_CORE */
34735
34736 +#ifdef CONFIG_PAX_MPROTECT
34737 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
34738 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34739 + * we'll remove VM_MAYWRITE for good on RELRO segments.
34740 + *
34741 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34742 + * basis because we want to allow the common case and not the special ones.
34743 + */
34744 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34745 +{
34746 + struct elfhdr elf_h;
34747 + struct elf_phdr elf_p;
34748 + unsigned long i;
34749 + unsigned long oldflags;
34750 + bool is_textrel_rw, is_textrel_rx, is_relro;
34751 +
34752 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34753 + return;
34754 +
34755 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34756 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34757 +
34758 +#ifdef CONFIG_PAX_ELFRELOCS
34759 + /* possible TEXTREL */
34760 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34761 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34762 +#else
34763 + is_textrel_rw = false;
34764 + is_textrel_rx = false;
34765 +#endif
34766 +
34767 + /* possible RELRO */
34768 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34769 +
34770 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34771 + return;
34772 +
34773 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34774 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34775 +
34776 +#ifdef CONFIG_PAX_ETEXECRELOCS
34777 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34778 +#else
34779 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34780 +#endif
34781 +
34782 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34783 + !elf_check_arch(&elf_h) ||
34784 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34785 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34786 + return;
34787 +
34788 + for (i = 0UL; i < elf_h.e_phnum; i++) {
34789 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34790 + return;
34791 + switch (elf_p.p_type) {
34792 + case PT_DYNAMIC:
34793 + if (!is_textrel_rw && !is_textrel_rx)
34794 + continue;
34795 + i = 0UL;
34796 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34797 + elf_dyn dyn;
34798 +
34799 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34800 + return;
34801 + if (dyn.d_tag == DT_NULL)
34802 + return;
34803 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34804 + gr_log_textrel(vma);
34805 + if (is_textrel_rw)
34806 + vma->vm_flags |= VM_MAYWRITE;
34807 + else
34808 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34809 + vma->vm_flags &= ~VM_MAYWRITE;
34810 + return;
34811 + }
34812 + i++;
34813 + }
34814 + return;
34815 +
34816 + case PT_GNU_RELRO:
34817 + if (!is_relro)
34818 + continue;
34819 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34820 + vma->vm_flags &= ~VM_MAYWRITE;
34821 + return;
34822 + }
34823 + }
34824 +}
34825 +#endif
34826 +
34827 static int __init init_elf_binfmt(void)
34828 {
34829 return register_binfmt(&elf_format);
34830 diff -urNp linux-2.6.39.4/fs/binfmt_flat.c linux-2.6.39.4/fs/binfmt_flat.c
34831 --- linux-2.6.39.4/fs/binfmt_flat.c 2011-05-19 00:06:34.000000000 -0400
34832 +++ linux-2.6.39.4/fs/binfmt_flat.c 2011-08-05 19:44:37.000000000 -0400
34833 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34834 realdatastart = (unsigned long) -ENOMEM;
34835 printk("Unable to allocate RAM for process data, errno %d\n",
34836 (int)-realdatastart);
34837 + down_write(&current->mm->mmap_sem);
34838 do_munmap(current->mm, textpos, text_len);
34839 + up_write(&current->mm->mmap_sem);
34840 ret = realdatastart;
34841 goto err;
34842 }
34843 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34844 }
34845 if (IS_ERR_VALUE(result)) {
34846 printk("Unable to read data+bss, errno %d\n", (int)-result);
34847 + down_write(&current->mm->mmap_sem);
34848 do_munmap(current->mm, textpos, text_len);
34849 do_munmap(current->mm, realdatastart, len);
34850 + up_write(&current->mm->mmap_sem);
34851 ret = result;
34852 goto err;
34853 }
34854 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34855 }
34856 if (IS_ERR_VALUE(result)) {
34857 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34858 + down_write(&current->mm->mmap_sem);
34859 do_munmap(current->mm, textpos, text_len + data_len + extra +
34860 MAX_SHARED_LIBS * sizeof(unsigned long));
34861 + up_write(&current->mm->mmap_sem);
34862 ret = result;
34863 goto err;
34864 }
34865 diff -urNp linux-2.6.39.4/fs/bio.c linux-2.6.39.4/fs/bio.c
34866 --- linux-2.6.39.4/fs/bio.c 2011-05-19 00:06:34.000000000 -0400
34867 +++ linux-2.6.39.4/fs/bio.c 2011-08-05 19:44:37.000000000 -0400
34868 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34869 const int read = bio_data_dir(bio) == READ;
34870 struct bio_map_data *bmd = bio->bi_private;
34871 int i;
34872 - char *p = bmd->sgvecs[0].iov_base;
34873 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
34874
34875 __bio_for_each_segment(bvec, bio, i, 0) {
34876 char *addr = page_address(bvec->bv_page);
34877 diff -urNp linux-2.6.39.4/fs/block_dev.c linux-2.6.39.4/fs/block_dev.c
34878 --- linux-2.6.39.4/fs/block_dev.c 2011-07-09 09:18:51.000000000 -0400
34879 +++ linux-2.6.39.4/fs/block_dev.c 2011-08-05 19:44:37.000000000 -0400
34880 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34881 else if (bdev->bd_contains == bdev)
34882 return true; /* is a whole device which isn't held */
34883
34884 - else if (whole->bd_holder == bd_may_claim)
34885 + else if (whole->bd_holder == (void *)bd_may_claim)
34886 return true; /* is a partition of a device that is being partitioned */
34887 else if (whole->bd_holder != NULL)
34888 return false; /* is a partition of a held device */
34889 diff -urNp linux-2.6.39.4/fs/btrfs/ctree.c linux-2.6.39.4/fs/btrfs/ctree.c
34890 --- linux-2.6.39.4/fs/btrfs/ctree.c 2011-05-19 00:06:34.000000000 -0400
34891 +++ linux-2.6.39.4/fs/btrfs/ctree.c 2011-08-05 19:44:37.000000000 -0400
34892 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
34893 free_extent_buffer(buf);
34894 add_root_to_dirty_list(root);
34895 } else {
34896 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34897 - parent_start = parent->start;
34898 - else
34899 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34900 + if (parent)
34901 + parent_start = parent->start;
34902 + else
34903 + parent_start = 0;
34904 + } else
34905 parent_start = 0;
34906
34907 WARN_ON(trans->transid != btrfs_header_generation(parent));
34908 @@ -3647,7 +3650,6 @@ setup_items_for_insert(struct btrfs_tran
34909
34910 ret = 0;
34911 if (slot == 0) {
34912 - struct btrfs_disk_key disk_key;
34913 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
34914 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
34915 }
34916 diff -urNp linux-2.6.39.4/fs/btrfs/free-space-cache.c linux-2.6.39.4/fs/btrfs/free-space-cache.c
34917 --- linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-05-19 00:06:34.000000000 -0400
34918 +++ linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-08-05 19:44:37.000000000 -0400
34919 @@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34920 while(1) {
34921 if (entry->bytes < bytes ||
34922 (!entry->bitmap && entry->offset < min_start)) {
34923 - struct rb_node *node;
34924 -
34925 node = rb_next(&entry->offset_index);
34926 if (!node)
34927 break;
34928 @@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34929 cluster, entry, bytes,
34930 min_start);
34931 if (ret == 0) {
34932 - struct rb_node *node;
34933 node = rb_next(&entry->offset_index);
34934 if (!node)
34935 break;
34936 diff -urNp linux-2.6.39.4/fs/btrfs/inode.c linux-2.6.39.4/fs/btrfs/inode.c
34937 --- linux-2.6.39.4/fs/btrfs/inode.c 2011-05-19 00:06:34.000000000 -0400
34938 +++ linux-2.6.39.4/fs/btrfs/inode.c 2011-08-05 20:34:06.000000000 -0400
34939 @@ -6947,7 +6947,7 @@ fail:
34940 return -ENOMEM;
34941 }
34942
34943 -static int btrfs_getattr(struct vfsmount *mnt,
34944 +int btrfs_getattr(struct vfsmount *mnt,
34945 struct dentry *dentry, struct kstat *stat)
34946 {
34947 struct inode *inode = dentry->d_inode;
34948 @@ -6959,6 +6959,14 @@ static int btrfs_getattr(struct vfsmount
34949 return 0;
34950 }
34951
34952 +EXPORT_SYMBOL(btrfs_getattr);
34953 +
34954 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
34955 +{
34956 + return BTRFS_I(inode)->root->anon_super.s_dev;
34957 +}
34958 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34959 +
34960 /*
34961 * If a file is moved, it will inherit the cow and compression flags of the new
34962 * directory.
34963 diff -urNp linux-2.6.39.4/fs/btrfs/ioctl.c linux-2.6.39.4/fs/btrfs/ioctl.c
34964 --- linux-2.6.39.4/fs/btrfs/ioctl.c 2011-05-19 00:06:34.000000000 -0400
34965 +++ linux-2.6.39.4/fs/btrfs/ioctl.c 2011-08-05 19:44:37.000000000 -0400
34966 @@ -2361,9 +2361,12 @@ long btrfs_ioctl_space_info(struct btrfs
34967 for (i = 0; i < num_types; i++) {
34968 struct btrfs_space_info *tmp;
34969
34970 + /* Don't copy in more than we allocated */
34971 if (!slot_count)
34972 break;
34973
34974 + slot_count--;
34975 +
34976 info = NULL;
34977 rcu_read_lock();
34978 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34979 @@ -2385,10 +2388,7 @@ long btrfs_ioctl_space_info(struct btrfs
34980 memcpy(dest, &space, sizeof(space));
34981 dest++;
34982 space_args.total_spaces++;
34983 - slot_count--;
34984 }
34985 - if (!slot_count)
34986 - break;
34987 }
34988 up_read(&info->groups_sem);
34989 }
34990 diff -urNp linux-2.6.39.4/fs/btrfs/relocation.c linux-2.6.39.4/fs/btrfs/relocation.c
34991 --- linux-2.6.39.4/fs/btrfs/relocation.c 2011-05-19 00:06:34.000000000 -0400
34992 +++ linux-2.6.39.4/fs/btrfs/relocation.c 2011-08-05 19:44:37.000000000 -0400
34993 @@ -1239,7 +1239,7 @@ static int __update_reloc_root(struct bt
34994 }
34995 spin_unlock(&rc->reloc_root_tree.lock);
34996
34997 - BUG_ON((struct btrfs_root *)node->data != root);
34998 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
34999
35000 if (!del) {
35001 spin_lock(&rc->reloc_root_tree.lock);
35002 diff -urNp linux-2.6.39.4/fs/cachefiles/bind.c linux-2.6.39.4/fs/cachefiles/bind.c
35003 --- linux-2.6.39.4/fs/cachefiles/bind.c 2011-05-19 00:06:34.000000000 -0400
35004 +++ linux-2.6.39.4/fs/cachefiles/bind.c 2011-08-05 19:44:37.000000000 -0400
35005 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
35006 args);
35007
35008 /* start by checking things over */
35009 - ASSERT(cache->fstop_percent >= 0 &&
35010 - cache->fstop_percent < cache->fcull_percent &&
35011 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
35012 cache->fcull_percent < cache->frun_percent &&
35013 cache->frun_percent < 100);
35014
35015 - ASSERT(cache->bstop_percent >= 0 &&
35016 - cache->bstop_percent < cache->bcull_percent &&
35017 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
35018 cache->bcull_percent < cache->brun_percent &&
35019 cache->brun_percent < 100);
35020
35021 diff -urNp linux-2.6.39.4/fs/cachefiles/daemon.c linux-2.6.39.4/fs/cachefiles/daemon.c
35022 --- linux-2.6.39.4/fs/cachefiles/daemon.c 2011-05-19 00:06:34.000000000 -0400
35023 +++ linux-2.6.39.4/fs/cachefiles/daemon.c 2011-08-05 19:44:37.000000000 -0400
35024 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
35025 if (n > buflen)
35026 return -EMSGSIZE;
35027
35028 - if (copy_to_user(_buffer, buffer, n) != 0)
35029 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
35030 return -EFAULT;
35031
35032 return n;
35033 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
35034 if (test_bit(CACHEFILES_DEAD, &cache->flags))
35035 return -EIO;
35036
35037 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
35038 + if (datalen > PAGE_SIZE - 1)
35039 return -EOPNOTSUPP;
35040
35041 /* drag the command string into the kernel so we can parse it */
35042 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
35043 if (args[0] != '%' || args[1] != '\0')
35044 return -EINVAL;
35045
35046 - if (fstop < 0 || fstop >= cache->fcull_percent)
35047 + if (fstop >= cache->fcull_percent)
35048 return cachefiles_daemon_range_error(cache, args);
35049
35050 cache->fstop_percent = fstop;
35051 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
35052 if (args[0] != '%' || args[1] != '\0')
35053 return -EINVAL;
35054
35055 - if (bstop < 0 || bstop >= cache->bcull_percent)
35056 + if (bstop >= cache->bcull_percent)
35057 return cachefiles_daemon_range_error(cache, args);
35058
35059 cache->bstop_percent = bstop;
35060 diff -urNp linux-2.6.39.4/fs/cachefiles/internal.h linux-2.6.39.4/fs/cachefiles/internal.h
35061 --- linux-2.6.39.4/fs/cachefiles/internal.h 2011-05-19 00:06:34.000000000 -0400
35062 +++ linux-2.6.39.4/fs/cachefiles/internal.h 2011-08-05 19:44:37.000000000 -0400
35063 @@ -57,7 +57,7 @@ struct cachefiles_cache {
35064 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
35065 struct rb_root active_nodes; /* active nodes (can't be culled) */
35066 rwlock_t active_lock; /* lock for active_nodes */
35067 - atomic_t gravecounter; /* graveyard uniquifier */
35068 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
35069 unsigned frun_percent; /* when to stop culling (% files) */
35070 unsigned fcull_percent; /* when to start culling (% files) */
35071 unsigned fstop_percent; /* when to stop allocating (% files) */
35072 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
35073 * proc.c
35074 */
35075 #ifdef CONFIG_CACHEFILES_HISTOGRAM
35076 -extern atomic_t cachefiles_lookup_histogram[HZ];
35077 -extern atomic_t cachefiles_mkdir_histogram[HZ];
35078 -extern atomic_t cachefiles_create_histogram[HZ];
35079 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35080 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35081 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
35082
35083 extern int __init cachefiles_proc_init(void);
35084 extern void cachefiles_proc_cleanup(void);
35085 static inline
35086 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
35087 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
35088 {
35089 unsigned long jif = jiffies - start_jif;
35090 if (jif >= HZ)
35091 jif = HZ - 1;
35092 - atomic_inc(&histogram[jif]);
35093 + atomic_inc_unchecked(&histogram[jif]);
35094 }
35095
35096 #else
35097 diff -urNp linux-2.6.39.4/fs/cachefiles/namei.c linux-2.6.39.4/fs/cachefiles/namei.c
35098 --- linux-2.6.39.4/fs/cachefiles/namei.c 2011-05-19 00:06:34.000000000 -0400
35099 +++ linux-2.6.39.4/fs/cachefiles/namei.c 2011-08-05 19:44:37.000000000 -0400
35100 @@ -318,7 +318,7 @@ try_again:
35101 /* first step is to make up a grave dentry in the graveyard */
35102 sprintf(nbuffer, "%08x%08x",
35103 (uint32_t) get_seconds(),
35104 - (uint32_t) atomic_inc_return(&cache->gravecounter));
35105 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
35106
35107 /* do the multiway lock magic */
35108 trap = lock_rename(cache->graveyard, dir);
35109 diff -urNp linux-2.6.39.4/fs/cachefiles/proc.c linux-2.6.39.4/fs/cachefiles/proc.c
35110 --- linux-2.6.39.4/fs/cachefiles/proc.c 2011-05-19 00:06:34.000000000 -0400
35111 +++ linux-2.6.39.4/fs/cachefiles/proc.c 2011-08-05 19:44:37.000000000 -0400
35112 @@ -14,9 +14,9 @@
35113 #include <linux/seq_file.h>
35114 #include "internal.h"
35115
35116 -atomic_t cachefiles_lookup_histogram[HZ];
35117 -atomic_t cachefiles_mkdir_histogram[HZ];
35118 -atomic_t cachefiles_create_histogram[HZ];
35119 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35120 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35121 +atomic_unchecked_t cachefiles_create_histogram[HZ];
35122
35123 /*
35124 * display the latency histogram
35125 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
35126 return 0;
35127 default:
35128 index = (unsigned long) v - 3;
35129 - x = atomic_read(&cachefiles_lookup_histogram[index]);
35130 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
35131 - z = atomic_read(&cachefiles_create_histogram[index]);
35132 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
35133 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
35134 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
35135 if (x == 0 && y == 0 && z == 0)
35136 return 0;
35137
35138 diff -urNp linux-2.6.39.4/fs/cachefiles/rdwr.c linux-2.6.39.4/fs/cachefiles/rdwr.c
35139 --- linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-05-19 00:06:34.000000000 -0400
35140 +++ linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-08-05 19:44:37.000000000 -0400
35141 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
35142 old_fs = get_fs();
35143 set_fs(KERNEL_DS);
35144 ret = file->f_op->write(
35145 - file, (const void __user *) data, len, &pos);
35146 + file, (__force const void __user *) data, len, &pos);
35147 set_fs(old_fs);
35148 kunmap(page);
35149 if (ret != len)
35150 diff -urNp linux-2.6.39.4/fs/ceph/dir.c linux-2.6.39.4/fs/ceph/dir.c
35151 --- linux-2.6.39.4/fs/ceph/dir.c 2011-05-19 00:06:34.000000000 -0400
35152 +++ linux-2.6.39.4/fs/ceph/dir.c 2011-08-05 19:44:37.000000000 -0400
35153 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
35154 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35155 struct ceph_mds_client *mdsc = fsc->mdsc;
35156 unsigned frag = fpos_frag(filp->f_pos);
35157 - int off = fpos_off(filp->f_pos);
35158 + unsigned int off = fpos_off(filp->f_pos);
35159 int err;
35160 u32 ftype;
35161 struct ceph_mds_reply_info_parsed *rinfo;
35162 @@ -360,7 +360,7 @@ more:
35163 rinfo = &fi->last_readdir->r_reply_info;
35164 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
35165 rinfo->dir_nr, off, fi->offset);
35166 - while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
35167 + while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
35168 u64 pos = ceph_make_fpos(frag, off);
35169 struct ceph_mds_reply_inode *in =
35170 rinfo->dir_in[off - fi->offset].in;
35171 diff -urNp linux-2.6.39.4/fs/cifs/cifs_debug.c linux-2.6.39.4/fs/cifs/cifs_debug.c
35172 --- linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-05-19 00:06:34.000000000 -0400
35173 +++ linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-08-05 19:44:37.000000000 -0400
35174 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35175 tcon = list_entry(tmp3,
35176 struct cifsTconInfo,
35177 tcon_list);
35178 - atomic_set(&tcon->num_smbs_sent, 0);
35179 - atomic_set(&tcon->num_writes, 0);
35180 - atomic_set(&tcon->num_reads, 0);
35181 - atomic_set(&tcon->num_oplock_brks, 0);
35182 - atomic_set(&tcon->num_opens, 0);
35183 - atomic_set(&tcon->num_posixopens, 0);
35184 - atomic_set(&tcon->num_posixmkdirs, 0);
35185 - atomic_set(&tcon->num_closes, 0);
35186 - atomic_set(&tcon->num_deletes, 0);
35187 - atomic_set(&tcon->num_mkdirs, 0);
35188 - atomic_set(&tcon->num_rmdirs, 0);
35189 - atomic_set(&tcon->num_renames, 0);
35190 - atomic_set(&tcon->num_t2renames, 0);
35191 - atomic_set(&tcon->num_ffirst, 0);
35192 - atomic_set(&tcon->num_fnext, 0);
35193 - atomic_set(&tcon->num_fclose, 0);
35194 - atomic_set(&tcon->num_hardlinks, 0);
35195 - atomic_set(&tcon->num_symlinks, 0);
35196 - atomic_set(&tcon->num_locks, 0);
35197 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35198 + atomic_set_unchecked(&tcon->num_writes, 0);
35199 + atomic_set_unchecked(&tcon->num_reads, 0);
35200 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35201 + atomic_set_unchecked(&tcon->num_opens, 0);
35202 + atomic_set_unchecked(&tcon->num_posixopens, 0);
35203 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35204 + atomic_set_unchecked(&tcon->num_closes, 0);
35205 + atomic_set_unchecked(&tcon->num_deletes, 0);
35206 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
35207 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
35208 + atomic_set_unchecked(&tcon->num_renames, 0);
35209 + atomic_set_unchecked(&tcon->num_t2renames, 0);
35210 + atomic_set_unchecked(&tcon->num_ffirst, 0);
35211 + atomic_set_unchecked(&tcon->num_fnext, 0);
35212 + atomic_set_unchecked(&tcon->num_fclose, 0);
35213 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
35214 + atomic_set_unchecked(&tcon->num_symlinks, 0);
35215 + atomic_set_unchecked(&tcon->num_locks, 0);
35216 }
35217 }
35218 }
35219 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35220 if (tcon->need_reconnect)
35221 seq_puts(m, "\tDISCONNECTED ");
35222 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35223 - atomic_read(&tcon->num_smbs_sent),
35224 - atomic_read(&tcon->num_oplock_brks));
35225 + atomic_read_unchecked(&tcon->num_smbs_sent),
35226 + atomic_read_unchecked(&tcon->num_oplock_brks));
35227 seq_printf(m, "\nReads: %d Bytes: %lld",
35228 - atomic_read(&tcon->num_reads),
35229 + atomic_read_unchecked(&tcon->num_reads),
35230 (long long)(tcon->bytes_read));
35231 seq_printf(m, "\nWrites: %d Bytes: %lld",
35232 - atomic_read(&tcon->num_writes),
35233 + atomic_read_unchecked(&tcon->num_writes),
35234 (long long)(tcon->bytes_written));
35235 seq_printf(m, "\nFlushes: %d",
35236 - atomic_read(&tcon->num_flushes));
35237 + atomic_read_unchecked(&tcon->num_flushes));
35238 seq_printf(m, "\nLocks: %d HardLinks: %d "
35239 "Symlinks: %d",
35240 - atomic_read(&tcon->num_locks),
35241 - atomic_read(&tcon->num_hardlinks),
35242 - atomic_read(&tcon->num_symlinks));
35243 + atomic_read_unchecked(&tcon->num_locks),
35244 + atomic_read_unchecked(&tcon->num_hardlinks),
35245 + atomic_read_unchecked(&tcon->num_symlinks));
35246 seq_printf(m, "\nOpens: %d Closes: %d "
35247 "Deletes: %d",
35248 - atomic_read(&tcon->num_opens),
35249 - atomic_read(&tcon->num_closes),
35250 - atomic_read(&tcon->num_deletes));
35251 + atomic_read_unchecked(&tcon->num_opens),
35252 + atomic_read_unchecked(&tcon->num_closes),
35253 + atomic_read_unchecked(&tcon->num_deletes));
35254 seq_printf(m, "\nPosix Opens: %d "
35255 "Posix Mkdirs: %d",
35256 - atomic_read(&tcon->num_posixopens),
35257 - atomic_read(&tcon->num_posixmkdirs));
35258 + atomic_read_unchecked(&tcon->num_posixopens),
35259 + atomic_read_unchecked(&tcon->num_posixmkdirs));
35260 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35261 - atomic_read(&tcon->num_mkdirs),
35262 - atomic_read(&tcon->num_rmdirs));
35263 + atomic_read_unchecked(&tcon->num_mkdirs),
35264 + atomic_read_unchecked(&tcon->num_rmdirs));
35265 seq_printf(m, "\nRenames: %d T2 Renames %d",
35266 - atomic_read(&tcon->num_renames),
35267 - atomic_read(&tcon->num_t2renames));
35268 + atomic_read_unchecked(&tcon->num_renames),
35269 + atomic_read_unchecked(&tcon->num_t2renames));
35270 seq_printf(m, "\nFindFirst: %d FNext %d "
35271 "FClose %d",
35272 - atomic_read(&tcon->num_ffirst),
35273 - atomic_read(&tcon->num_fnext),
35274 - atomic_read(&tcon->num_fclose));
35275 + atomic_read_unchecked(&tcon->num_ffirst),
35276 + atomic_read_unchecked(&tcon->num_fnext),
35277 + atomic_read_unchecked(&tcon->num_fclose));
35278 }
35279 }
35280 }
35281 diff -urNp linux-2.6.39.4/fs/cifs/cifsglob.h linux-2.6.39.4/fs/cifs/cifsglob.h
35282 --- linux-2.6.39.4/fs/cifs/cifsglob.h 2011-05-19 00:06:34.000000000 -0400
35283 +++ linux-2.6.39.4/fs/cifs/cifsglob.h 2011-08-05 19:44:37.000000000 -0400
35284 @@ -305,28 +305,28 @@ struct cifsTconInfo {
35285 __u16 Flags; /* optional support bits */
35286 enum statusEnum tidStatus;
35287 #ifdef CONFIG_CIFS_STATS
35288 - atomic_t num_smbs_sent;
35289 - atomic_t num_writes;
35290 - atomic_t num_reads;
35291 - atomic_t num_flushes;
35292 - atomic_t num_oplock_brks;
35293 - atomic_t num_opens;
35294 - atomic_t num_closes;
35295 - atomic_t num_deletes;
35296 - atomic_t num_mkdirs;
35297 - atomic_t num_posixopens;
35298 - atomic_t num_posixmkdirs;
35299 - atomic_t num_rmdirs;
35300 - atomic_t num_renames;
35301 - atomic_t num_t2renames;
35302 - atomic_t num_ffirst;
35303 - atomic_t num_fnext;
35304 - atomic_t num_fclose;
35305 - atomic_t num_hardlinks;
35306 - atomic_t num_symlinks;
35307 - atomic_t num_locks;
35308 - atomic_t num_acl_get;
35309 - atomic_t num_acl_set;
35310 + atomic_unchecked_t num_smbs_sent;
35311 + atomic_unchecked_t num_writes;
35312 + atomic_unchecked_t num_reads;
35313 + atomic_unchecked_t num_flushes;
35314 + atomic_unchecked_t num_oplock_brks;
35315 + atomic_unchecked_t num_opens;
35316 + atomic_unchecked_t num_closes;
35317 + atomic_unchecked_t num_deletes;
35318 + atomic_unchecked_t num_mkdirs;
35319 + atomic_unchecked_t num_posixopens;
35320 + atomic_unchecked_t num_posixmkdirs;
35321 + atomic_unchecked_t num_rmdirs;
35322 + atomic_unchecked_t num_renames;
35323 + atomic_unchecked_t num_t2renames;
35324 + atomic_unchecked_t num_ffirst;
35325 + atomic_unchecked_t num_fnext;
35326 + atomic_unchecked_t num_fclose;
35327 + atomic_unchecked_t num_hardlinks;
35328 + atomic_unchecked_t num_symlinks;
35329 + atomic_unchecked_t num_locks;
35330 + atomic_unchecked_t num_acl_get;
35331 + atomic_unchecked_t num_acl_set;
35332 #ifdef CONFIG_CIFS_STATS2
35333 unsigned long long time_writes;
35334 unsigned long long time_reads;
35335 @@ -509,7 +509,7 @@ static inline char CIFS_DIR_SEP(const st
35336 }
35337
35338 #ifdef CONFIG_CIFS_STATS
35339 -#define cifs_stats_inc atomic_inc
35340 +#define cifs_stats_inc atomic_inc_unchecked
35341
35342 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
35343 unsigned int bytes)
35344 diff -urNp linux-2.6.39.4/fs/cifs/link.c linux-2.6.39.4/fs/cifs/link.c
35345 --- linux-2.6.39.4/fs/cifs/link.c 2011-05-19 00:06:34.000000000 -0400
35346 +++ linux-2.6.39.4/fs/cifs/link.c 2011-08-05 19:44:37.000000000 -0400
35347 @@ -577,7 +577,7 @@ symlink_exit:
35348
35349 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35350 {
35351 - char *p = nd_get_link(nd);
35352 + const char *p = nd_get_link(nd);
35353 if (!IS_ERR(p))
35354 kfree(p);
35355 }
35356 diff -urNp linux-2.6.39.4/fs/coda/cache.c linux-2.6.39.4/fs/coda/cache.c
35357 --- linux-2.6.39.4/fs/coda/cache.c 2011-05-19 00:06:34.000000000 -0400
35358 +++ linux-2.6.39.4/fs/coda/cache.c 2011-08-05 19:44:37.000000000 -0400
35359 @@ -24,7 +24,7 @@
35360 #include "coda_linux.h"
35361 #include "coda_cache.h"
35362
35363 -static atomic_t permission_epoch = ATOMIC_INIT(0);
35364 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35365
35366 /* replace or extend an acl cache hit */
35367 void coda_cache_enter(struct inode *inode, int mask)
35368 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35369 struct coda_inode_info *cii = ITOC(inode);
35370
35371 spin_lock(&cii->c_lock);
35372 - cii->c_cached_epoch = atomic_read(&permission_epoch);
35373 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35374 if (cii->c_uid != current_fsuid()) {
35375 cii->c_uid = current_fsuid();
35376 cii->c_cached_perm = mask;
35377 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35378 {
35379 struct coda_inode_info *cii = ITOC(inode);
35380 spin_lock(&cii->c_lock);
35381 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35382 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35383 spin_unlock(&cii->c_lock);
35384 }
35385
35386 /* remove all acl caches */
35387 void coda_cache_clear_all(struct super_block *sb)
35388 {
35389 - atomic_inc(&permission_epoch);
35390 + atomic_inc_unchecked(&permission_epoch);
35391 }
35392
35393
35394 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35395 spin_lock(&cii->c_lock);
35396 hit = (mask & cii->c_cached_perm) == mask &&
35397 cii->c_uid == current_fsuid() &&
35398 - cii->c_cached_epoch == atomic_read(&permission_epoch);
35399 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35400 spin_unlock(&cii->c_lock);
35401
35402 return hit;
35403 diff -urNp linux-2.6.39.4/fs/compat_binfmt_elf.c linux-2.6.39.4/fs/compat_binfmt_elf.c
35404 --- linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
35405 +++ linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
35406 @@ -30,11 +30,13 @@
35407 #undef elf_phdr
35408 #undef elf_shdr
35409 #undef elf_note
35410 +#undef elf_dyn
35411 #undef elf_addr_t
35412 #define elfhdr elf32_hdr
35413 #define elf_phdr elf32_phdr
35414 #define elf_shdr elf32_shdr
35415 #define elf_note elf32_note
35416 +#define elf_dyn Elf32_Dyn
35417 #define elf_addr_t Elf32_Addr
35418
35419 /*
35420 diff -urNp linux-2.6.39.4/fs/compat.c linux-2.6.39.4/fs/compat.c
35421 --- linux-2.6.39.4/fs/compat.c 2011-05-19 00:06:34.000000000 -0400
35422 +++ linux-2.6.39.4/fs/compat.c 2011-08-05 19:44:37.000000000 -0400
35423 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35424 goto out;
35425
35426 ret = -EINVAL;
35427 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35428 + if (nr_segs > UIO_MAXIOV)
35429 goto out;
35430 if (nr_segs > fast_segs) {
35431 ret = -ENOMEM;
35432 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35433
35434 struct compat_readdir_callback {
35435 struct compat_old_linux_dirent __user *dirent;
35436 + struct file * file;
35437 int result;
35438 };
35439
35440 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35441 buf->result = -EOVERFLOW;
35442 return -EOVERFLOW;
35443 }
35444 +
35445 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35446 + return 0;
35447 +
35448 buf->result++;
35449 dirent = buf->dirent;
35450 if (!access_ok(VERIFY_WRITE, dirent,
35451 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35452
35453 buf.result = 0;
35454 buf.dirent = dirent;
35455 + buf.file = file;
35456
35457 error = vfs_readdir(file, compat_fillonedir, &buf);
35458 if (buf.result)
35459 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
35460 struct compat_getdents_callback {
35461 struct compat_linux_dirent __user *current_dir;
35462 struct compat_linux_dirent __user *previous;
35463 + struct file * file;
35464 int count;
35465 int error;
35466 };
35467 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35468 buf->error = -EOVERFLOW;
35469 return -EOVERFLOW;
35470 }
35471 +
35472 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35473 + return 0;
35474 +
35475 dirent = buf->previous;
35476 if (dirent) {
35477 if (__put_user(offset, &dirent->d_off))
35478 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35479 buf.previous = NULL;
35480 buf.count = count;
35481 buf.error = 0;
35482 + buf.file = file;
35483
35484 error = vfs_readdir(file, compat_filldir, &buf);
35485 if (error >= 0)
35486 @@ -1006,6 +1018,7 @@ out:
35487 struct compat_getdents_callback64 {
35488 struct linux_dirent64 __user *current_dir;
35489 struct linux_dirent64 __user *previous;
35490 + struct file * file;
35491 int count;
35492 int error;
35493 };
35494 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35495 buf->error = -EINVAL; /* only used if we fail.. */
35496 if (reclen > buf->count)
35497 return -EINVAL;
35498 +
35499 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35500 + return 0;
35501 +
35502 dirent = buf->previous;
35503
35504 if (dirent) {
35505 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35506 buf.previous = NULL;
35507 buf.count = count;
35508 buf.error = 0;
35509 + buf.file = file;
35510
35511 error = vfs_readdir(file, compat_filldir64, &buf);
35512 if (error >= 0)
35513 @@ -1436,6 +1454,11 @@ int compat_do_execve(char * filename,
35514 compat_uptr_t __user *envp,
35515 struct pt_regs * regs)
35516 {
35517 +#ifdef CONFIG_GRKERNSEC
35518 + struct file *old_exec_file;
35519 + struct acl_subject_label *old_acl;
35520 + struct rlimit old_rlim[RLIM_NLIMITS];
35521 +#endif
35522 struct linux_binprm *bprm;
35523 struct file *file;
35524 struct files_struct *displaced;
35525 @@ -1472,6 +1495,19 @@ int compat_do_execve(char * filename,
35526 bprm->filename = filename;
35527 bprm->interp = filename;
35528
35529 + if (gr_process_user_ban()) {
35530 + retval = -EPERM;
35531 + goto out_file;
35532 + }
35533 +
35534 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35535 + retval = -EAGAIN;
35536 + if (gr_handle_nproc())
35537 + goto out_file;
35538 + retval = -EACCES;
35539 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
35540 + goto out_file;
35541 +
35542 retval = bprm_mm_init(bprm);
35543 if (retval)
35544 goto out_file;
35545 @@ -1501,9 +1537,40 @@ int compat_do_execve(char * filename,
35546 if (retval < 0)
35547 goto out;
35548
35549 + if (!gr_tpe_allow(file)) {
35550 + retval = -EACCES;
35551 + goto out;
35552 + }
35553 +
35554 + if (gr_check_crash_exec(file)) {
35555 + retval = -EACCES;
35556 + goto out;
35557 + }
35558 +
35559 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35560 +
35561 + gr_handle_exec_args_compat(bprm, argv);
35562 +
35563 +#ifdef CONFIG_GRKERNSEC
35564 + old_acl = current->acl;
35565 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35566 + old_exec_file = current->exec_file;
35567 + get_file(file);
35568 + current->exec_file = file;
35569 +#endif
35570 +
35571 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35572 + bprm->unsafe & LSM_UNSAFE_SHARE);
35573 + if (retval < 0)
35574 + goto out_fail;
35575 +
35576 retval = search_binary_handler(bprm, regs);
35577 if (retval < 0)
35578 - goto out;
35579 + goto out_fail;
35580 +#ifdef CONFIG_GRKERNSEC
35581 + if (old_exec_file)
35582 + fput(old_exec_file);
35583 +#endif
35584
35585 /* execve succeeded */
35586 current->fs->in_exec = 0;
35587 @@ -1514,6 +1581,14 @@ int compat_do_execve(char * filename,
35588 put_files_struct(displaced);
35589 return retval;
35590
35591 +out_fail:
35592 +#ifdef CONFIG_GRKERNSEC
35593 + current->acl = old_acl;
35594 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35595 + fput(current->exec_file);
35596 + current->exec_file = old_exec_file;
35597 +#endif
35598 +
35599 out:
35600 if (bprm->mm) {
35601 acct_arg_size(bprm, 0);
35602 @@ -1681,6 +1756,8 @@ int compat_core_sys_select(int n, compat
35603 struct fdtable *fdt;
35604 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35605
35606 + pax_track_stack();
35607 +
35608 if (n < 0)
35609 goto out_nofds;
35610
35611 diff -urNp linux-2.6.39.4/fs/compat_ioctl.c linux-2.6.39.4/fs/compat_ioctl.c
35612 --- linux-2.6.39.4/fs/compat_ioctl.c 2011-05-19 00:06:34.000000000 -0400
35613 +++ linux-2.6.39.4/fs/compat_ioctl.c 2011-08-05 19:44:37.000000000 -0400
35614 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35615
35616 err = get_user(palp, &up->palette);
35617 err |= get_user(length, &up->length);
35618 + if (err)
35619 + return -EFAULT;
35620
35621 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35622 err = put_user(compat_ptr(palp), &up_native->palette);
35623 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35624 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35625 {
35626 unsigned int a, b;
35627 - a = *(unsigned int *)p;
35628 - b = *(unsigned int *)q;
35629 + a = *(const unsigned int *)p;
35630 + b = *(const unsigned int *)q;
35631 if (a > b)
35632 return 1;
35633 if (a < b)
35634 diff -urNp linux-2.6.39.4/fs/configfs/dir.c linux-2.6.39.4/fs/configfs/dir.c
35635 --- linux-2.6.39.4/fs/configfs/dir.c 2011-05-19 00:06:34.000000000 -0400
35636 +++ linux-2.6.39.4/fs/configfs/dir.c 2011-08-05 19:44:37.000000000 -0400
35637 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35638 }
35639 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35640 struct configfs_dirent *next;
35641 - const char * name;
35642 + const unsigned char * name;
35643 + char d_name[sizeof(next->s_dentry->d_iname)];
35644 int len;
35645 struct inode *inode = NULL;
35646
35647 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35648 continue;
35649
35650 name = configfs_get_name(next);
35651 - len = strlen(name);
35652 + if (next->s_dentry && name == next->s_dentry->d_iname) {
35653 + len = next->s_dentry->d_name.len;
35654 + memcpy(d_name, name, len);
35655 + name = d_name;
35656 + } else
35657 + len = strlen(name);
35658
35659 /*
35660 * We'll have a dentry and an inode for
35661 diff -urNp linux-2.6.39.4/fs/dcache.c linux-2.6.39.4/fs/dcache.c
35662 --- linux-2.6.39.4/fs/dcache.c 2011-05-19 00:06:34.000000000 -0400
35663 +++ linux-2.6.39.4/fs/dcache.c 2011-08-05 19:44:37.000000000 -0400
35664 @@ -3069,7 +3069,7 @@ void __init vfs_caches_init(unsigned lon
35665 mempages -= reserve;
35666
35667 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35668 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35669 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35670
35671 dcache_init();
35672 inode_init();
35673 diff -urNp linux-2.6.39.4/fs/ecryptfs/inode.c linux-2.6.39.4/fs/ecryptfs/inode.c
35674 --- linux-2.6.39.4/fs/ecryptfs/inode.c 2011-06-03 00:04:14.000000000 -0400
35675 +++ linux-2.6.39.4/fs/ecryptfs/inode.c 2011-08-05 19:44:37.000000000 -0400
35676 @@ -623,7 +623,7 @@ static int ecryptfs_readlink_lower(struc
35677 old_fs = get_fs();
35678 set_fs(get_ds());
35679 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35680 - (char __user *)lower_buf,
35681 + (__force char __user *)lower_buf,
35682 lower_bufsiz);
35683 set_fs(old_fs);
35684 if (rc < 0)
35685 @@ -669,7 +669,7 @@ static void *ecryptfs_follow_link(struct
35686 }
35687 old_fs = get_fs();
35688 set_fs(get_ds());
35689 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35690 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35691 set_fs(old_fs);
35692 if (rc < 0) {
35693 kfree(buf);
35694 @@ -684,7 +684,7 @@ out:
35695 static void
35696 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35697 {
35698 - char *buf = nd_get_link(nd);
35699 + const char *buf = nd_get_link(nd);
35700 if (!IS_ERR(buf)) {
35701 /* Free the char* */
35702 kfree(buf);
35703 diff -urNp linux-2.6.39.4/fs/ecryptfs/miscdev.c linux-2.6.39.4/fs/ecryptfs/miscdev.c
35704 --- linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-05-19 00:06:34.000000000 -0400
35705 +++ linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-08-05 19:44:37.000000000 -0400
35706 @@ -328,7 +328,7 @@ check_list:
35707 goto out_unlock_msg_ctx;
35708 i = 5;
35709 if (msg_ctx->msg) {
35710 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
35711 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35712 goto out_unlock_msg_ctx;
35713 i += packet_length_size;
35714 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35715 diff -urNp linux-2.6.39.4/fs/exec.c linux-2.6.39.4/fs/exec.c
35716 --- linux-2.6.39.4/fs/exec.c 2011-06-25 12:55:23.000000000 -0400
35717 +++ linux-2.6.39.4/fs/exec.c 2011-08-05 19:44:37.000000000 -0400
35718 @@ -55,12 +55,24 @@
35719 #include <linux/fs_struct.h>
35720 #include <linux/pipe_fs_i.h>
35721 #include <linux/oom.h>
35722 +#include <linux/random.h>
35723 +#include <linux/seq_file.h>
35724 +
35725 +#ifdef CONFIG_PAX_REFCOUNT
35726 +#include <linux/kallsyms.h>
35727 +#include <linux/kdebug.h>
35728 +#endif
35729
35730 #include <asm/uaccess.h>
35731 #include <asm/mmu_context.h>
35732 #include <asm/tlb.h>
35733 #include "internal.h"
35734
35735 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35736 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35737 +EXPORT_SYMBOL(pax_set_initial_flags_func);
35738 +#endif
35739 +
35740 int core_uses_pid;
35741 char core_pattern[CORENAME_MAX_SIZE] = "core";
35742 unsigned int core_pipe_limit;
35743 @@ -70,7 +82,7 @@ struct core_name {
35744 char *corename;
35745 int used, size;
35746 };
35747 -static atomic_t call_count = ATOMIC_INIT(1);
35748 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35749
35750 /* The maximal length of core_pattern is also specified in sysctl.c */
35751
35752 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35753 char *tmp = getname(library);
35754 int error = PTR_ERR(tmp);
35755 static const struct open_flags uselib_flags = {
35756 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35757 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35758 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35759 .intent = LOOKUP_OPEN
35760 };
35761 @@ -190,18 +202,10 @@ struct page *get_arg_page(struct linux_b
35762 int write)
35763 {
35764 struct page *page;
35765 - int ret;
35766
35767 -#ifdef CONFIG_STACK_GROWSUP
35768 - if (write) {
35769 - ret = expand_stack_downwards(bprm->vma, pos);
35770 - if (ret < 0)
35771 - return NULL;
35772 - }
35773 -#endif
35774 - ret = get_user_pages(current, bprm->mm, pos,
35775 - 1, write, 1, &page, NULL);
35776 - if (ret <= 0)
35777 + if (0 > expand_stack_downwards(bprm->vma, pos))
35778 + return NULL;
35779 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35780 return NULL;
35781
35782 if (write) {
35783 @@ -276,6 +280,11 @@ static int __bprm_mm_init(struct linux_b
35784 vma->vm_end = STACK_TOP_MAX;
35785 vma->vm_start = vma->vm_end - PAGE_SIZE;
35786 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35787 +
35788 +#ifdef CONFIG_PAX_SEGMEXEC
35789 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35790 +#endif
35791 +
35792 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35793 INIT_LIST_HEAD(&vma->anon_vma_chain);
35794
35795 @@ -290,6 +299,12 @@ static int __bprm_mm_init(struct linux_b
35796 mm->stack_vm = mm->total_vm = 1;
35797 up_write(&mm->mmap_sem);
35798 bprm->p = vma->vm_end - sizeof(void *);
35799 +
35800 +#ifdef CONFIG_PAX_RANDUSTACK
35801 + if (randomize_va_space)
35802 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35803 +#endif
35804 +
35805 return 0;
35806 err:
35807 up_write(&mm->mmap_sem);
35808 @@ -525,7 +540,7 @@ int copy_strings_kernel(int argc, const
35809 int r;
35810 mm_segment_t oldfs = get_fs();
35811 set_fs(KERNEL_DS);
35812 - r = copy_strings(argc, (const char __user *const __user *)argv, bprm);
35813 + r = copy_strings(argc, (__force const char __user *const __user *)argv, bprm);
35814 set_fs(oldfs);
35815 return r;
35816 }
35817 @@ -555,7 +570,8 @@ static int shift_arg_pages(struct vm_are
35818 unsigned long new_end = old_end - shift;
35819 struct mmu_gather *tlb;
35820
35821 - BUG_ON(new_start > new_end);
35822 + if (new_start >= new_end || new_start < mmap_min_addr)
35823 + return -ENOMEM;
35824
35825 /*
35826 * ensure there are no vmas between where we want to go
35827 @@ -564,6 +580,10 @@ static int shift_arg_pages(struct vm_are
35828 if (vma != find_vma(mm, new_start))
35829 return -EFAULT;
35830
35831 +#ifdef CONFIG_PAX_SEGMEXEC
35832 + BUG_ON(pax_find_mirror_vma(vma));
35833 +#endif
35834 +
35835 /*
35836 * cover the whole range: [new_start, old_end)
35837 */
35838 @@ -644,10 +664,6 @@ int setup_arg_pages(struct linux_binprm
35839 stack_top = arch_align_stack(stack_top);
35840 stack_top = PAGE_ALIGN(stack_top);
35841
35842 - if (unlikely(stack_top < mmap_min_addr) ||
35843 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35844 - return -ENOMEM;
35845 -
35846 stack_shift = vma->vm_end - stack_top;
35847
35848 bprm->p -= stack_shift;
35849 @@ -659,8 +675,28 @@ int setup_arg_pages(struct linux_binprm
35850 bprm->exec -= stack_shift;
35851
35852 down_write(&mm->mmap_sem);
35853 +
35854 + /* Move stack pages down in memory. */
35855 + if (stack_shift) {
35856 + ret = shift_arg_pages(vma, stack_shift);
35857 + if (ret)
35858 + goto out_unlock;
35859 + }
35860 +
35861 vm_flags = VM_STACK_FLAGS;
35862
35863 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35864 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35865 + vm_flags &= ~VM_EXEC;
35866 +
35867 +#ifdef CONFIG_PAX_MPROTECT
35868 + if (mm->pax_flags & MF_PAX_MPROTECT)
35869 + vm_flags &= ~VM_MAYEXEC;
35870 +#endif
35871 +
35872 + }
35873 +#endif
35874 +
35875 /*
35876 * Adjust stack execute permissions; explicitly enable for
35877 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35878 @@ -679,13 +715,6 @@ int setup_arg_pages(struct linux_binprm
35879 goto out_unlock;
35880 BUG_ON(prev != vma);
35881
35882 - /* Move stack pages down in memory. */
35883 - if (stack_shift) {
35884 - ret = shift_arg_pages(vma, stack_shift);
35885 - if (ret)
35886 - goto out_unlock;
35887 - }
35888 -
35889 /* mprotect_fixup is overkill to remove the temporary stack flags */
35890 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35891
35892 @@ -725,7 +754,7 @@ struct file *open_exec(const char *name)
35893 struct file *file;
35894 int err;
35895 static const struct open_flags open_exec_flags = {
35896 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35897 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35898 .acc_mode = MAY_EXEC | MAY_OPEN,
35899 .intent = LOOKUP_OPEN
35900 };
35901 @@ -766,7 +795,7 @@ int kernel_read(struct file *file, loff_
35902 old_fs = get_fs();
35903 set_fs(get_ds());
35904 /* The cast to a user pointer is valid due to the set_fs() */
35905 - result = vfs_read(file, (void __user *)addr, count, &pos);
35906 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
35907 set_fs(old_fs);
35908 return result;
35909 }
35910 @@ -1189,7 +1218,7 @@ int check_unsafe_exec(struct linux_binpr
35911 }
35912 rcu_read_unlock();
35913
35914 - if (p->fs->users > n_fs) {
35915 + if (atomic_read(&p->fs->users) > n_fs) {
35916 bprm->unsafe |= LSM_UNSAFE_SHARE;
35917 } else {
35918 res = -EAGAIN;
35919 @@ -1381,6 +1410,11 @@ int do_execve(const char * filename,
35920 const char __user *const __user *envp,
35921 struct pt_regs * regs)
35922 {
35923 +#ifdef CONFIG_GRKERNSEC
35924 + struct file *old_exec_file;
35925 + struct acl_subject_label *old_acl;
35926 + struct rlimit old_rlim[RLIM_NLIMITS];
35927 +#endif
35928 struct linux_binprm *bprm;
35929 struct file *file;
35930 struct files_struct *displaced;
35931 @@ -1417,6 +1451,23 @@ int do_execve(const char * filename,
35932 bprm->filename = filename;
35933 bprm->interp = filename;
35934
35935 + if (gr_process_user_ban()) {
35936 + retval = -EPERM;
35937 + goto out_file;
35938 + }
35939 +
35940 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35941 +
35942 + if (gr_handle_nproc()) {
35943 + retval = -EAGAIN;
35944 + goto out_file;
35945 + }
35946 +
35947 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35948 + retval = -EACCES;
35949 + goto out_file;
35950 + }
35951 +
35952 retval = bprm_mm_init(bprm);
35953 if (retval)
35954 goto out_file;
35955 @@ -1446,9 +1497,40 @@ int do_execve(const char * filename,
35956 if (retval < 0)
35957 goto out;
35958
35959 + if (!gr_tpe_allow(file)) {
35960 + retval = -EACCES;
35961 + goto out;
35962 + }
35963 +
35964 + if (gr_check_crash_exec(file)) {
35965 + retval = -EACCES;
35966 + goto out;
35967 + }
35968 +
35969 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35970 +
35971 + gr_handle_exec_args(bprm, argv);
35972 +
35973 +#ifdef CONFIG_GRKERNSEC
35974 + old_acl = current->acl;
35975 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35976 + old_exec_file = current->exec_file;
35977 + get_file(file);
35978 + current->exec_file = file;
35979 +#endif
35980 +
35981 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35982 + bprm->unsafe & LSM_UNSAFE_SHARE);
35983 + if (retval < 0)
35984 + goto out_fail;
35985 +
35986 retval = search_binary_handler(bprm,regs);
35987 if (retval < 0)
35988 - goto out;
35989 + goto out_fail;
35990 +#ifdef CONFIG_GRKERNSEC
35991 + if (old_exec_file)
35992 + fput(old_exec_file);
35993 +#endif
35994
35995 /* execve succeeded */
35996 current->fs->in_exec = 0;
35997 @@ -1459,6 +1541,14 @@ int do_execve(const char * filename,
35998 put_files_struct(displaced);
35999 return retval;
36000
36001 +out_fail:
36002 +#ifdef CONFIG_GRKERNSEC
36003 + current->acl = old_acl;
36004 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
36005 + fput(current->exec_file);
36006 + current->exec_file = old_exec_file;
36007 +#endif
36008 +
36009 out:
36010 if (bprm->mm) {
36011 acct_arg_size(bprm, 0);
36012 @@ -1504,7 +1594,7 @@ static int expand_corename(struct core_n
36013 {
36014 char *old_corename = cn->corename;
36015
36016 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
36017 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
36018 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
36019
36020 if (!cn->corename) {
36021 @@ -1557,7 +1647,7 @@ static int format_corename(struct core_n
36022 int pid_in_pattern = 0;
36023 int err = 0;
36024
36025 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
36026 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
36027 cn->corename = kmalloc(cn->size, GFP_KERNEL);
36028 cn->used = 0;
36029
36030 @@ -1645,6 +1735,219 @@ out:
36031 return ispipe;
36032 }
36033
36034 +int pax_check_flags(unsigned long *flags)
36035 +{
36036 + int retval = 0;
36037 +
36038 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
36039 + if (*flags & MF_PAX_SEGMEXEC)
36040 + {
36041 + *flags &= ~MF_PAX_SEGMEXEC;
36042 + retval = -EINVAL;
36043 + }
36044 +#endif
36045 +
36046 + if ((*flags & MF_PAX_PAGEEXEC)
36047 +
36048 +#ifdef CONFIG_PAX_PAGEEXEC
36049 + && (*flags & MF_PAX_SEGMEXEC)
36050 +#endif
36051 +
36052 + )
36053 + {
36054 + *flags &= ~MF_PAX_PAGEEXEC;
36055 + retval = -EINVAL;
36056 + }
36057 +
36058 + if ((*flags & MF_PAX_MPROTECT)
36059 +
36060 +#ifdef CONFIG_PAX_MPROTECT
36061 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36062 +#endif
36063 +
36064 + )
36065 + {
36066 + *flags &= ~MF_PAX_MPROTECT;
36067 + retval = -EINVAL;
36068 + }
36069 +
36070 + if ((*flags & MF_PAX_EMUTRAMP)
36071 +
36072 +#ifdef CONFIG_PAX_EMUTRAMP
36073 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36074 +#endif
36075 +
36076 + )
36077 + {
36078 + *flags &= ~MF_PAX_EMUTRAMP;
36079 + retval = -EINVAL;
36080 + }
36081 +
36082 + return retval;
36083 +}
36084 +
36085 +EXPORT_SYMBOL(pax_check_flags);
36086 +
36087 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36088 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
36089 +{
36090 + struct task_struct *tsk = current;
36091 + struct mm_struct *mm = current->mm;
36092 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
36093 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
36094 + char *path_exec = NULL;
36095 + char *path_fault = NULL;
36096 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
36097 +
36098 + if (buffer_exec && buffer_fault) {
36099 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
36100 +
36101 + down_read(&mm->mmap_sem);
36102 + vma = mm->mmap;
36103 + while (vma && (!vma_exec || !vma_fault)) {
36104 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
36105 + vma_exec = vma;
36106 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
36107 + vma_fault = vma;
36108 + vma = vma->vm_next;
36109 + }
36110 + if (vma_exec) {
36111 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
36112 + if (IS_ERR(path_exec))
36113 + path_exec = "<path too long>";
36114 + else {
36115 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
36116 + if (path_exec) {
36117 + *path_exec = 0;
36118 + path_exec = buffer_exec;
36119 + } else
36120 + path_exec = "<path too long>";
36121 + }
36122 + }
36123 + if (vma_fault) {
36124 + start = vma_fault->vm_start;
36125 + end = vma_fault->vm_end;
36126 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
36127 + if (vma_fault->vm_file) {
36128 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36129 + if (IS_ERR(path_fault))
36130 + path_fault = "<path too long>";
36131 + else {
36132 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36133 + if (path_fault) {
36134 + *path_fault = 0;
36135 + path_fault = buffer_fault;
36136 + } else
36137 + path_fault = "<path too long>";
36138 + }
36139 + } else
36140 + path_fault = "<anonymous mapping>";
36141 + }
36142 + up_read(&mm->mmap_sem);
36143 + }
36144 + if (tsk->signal->curr_ip)
36145 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36146 + else
36147 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36148 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36149 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36150 + task_uid(tsk), task_euid(tsk), pc, sp);
36151 + free_page((unsigned long)buffer_exec);
36152 + free_page((unsigned long)buffer_fault);
36153 + pax_report_insns(pc, sp);
36154 + do_coredump(SIGKILL, SIGKILL, regs);
36155 +}
36156 +#endif
36157 +
36158 +#ifdef CONFIG_PAX_REFCOUNT
36159 +void pax_report_refcount_overflow(struct pt_regs *regs)
36160 +{
36161 + if (current->signal->curr_ip)
36162 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36163 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36164 + else
36165 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36166 + current->comm, task_pid_nr(current), current_uid(), current_euid());
36167 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36168 + show_regs(regs);
36169 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36170 +}
36171 +#endif
36172 +
36173 +#ifdef CONFIG_PAX_USERCOPY
36174 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36175 +int object_is_on_stack(const void *obj, unsigned long len)
36176 +{
36177 + const void * const stack = task_stack_page(current);
36178 + const void * const stackend = stack + THREAD_SIZE;
36179 +
36180 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36181 + const void *frame = NULL;
36182 + const void *oldframe;
36183 +#endif
36184 +
36185 + if (obj + len < obj)
36186 + return -1;
36187 +
36188 + if (obj + len <= stack || stackend <= obj)
36189 + return 0;
36190 +
36191 + if (obj < stack || stackend < obj + len)
36192 + return -1;
36193 +
36194 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36195 + oldframe = __builtin_frame_address(1);
36196 + if (oldframe)
36197 + frame = __builtin_frame_address(2);
36198 + /*
36199 + low ----------------------------------------------> high
36200 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
36201 + ^----------------^
36202 + allow copies only within here
36203 + */
36204 + while (stack <= frame && frame < stackend) {
36205 + /* if obj + len extends past the last frame, this
36206 + check won't pass and the next frame will be 0,
36207 + causing us to bail out and correctly report
36208 + the copy as invalid
36209 + */
36210 + if (obj + len <= frame)
36211 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36212 + oldframe = frame;
36213 + frame = *(const void * const *)frame;
36214 + }
36215 + return -1;
36216 +#else
36217 + return 1;
36218 +#endif
36219 +}
36220 +
36221 +
36222 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36223 +{
36224 + if (current->signal->curr_ip)
36225 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36226 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36227 + else
36228 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36229 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36230 + dump_stack();
36231 + gr_handle_kernel_exploit();
36232 + do_group_exit(SIGKILL);
36233 +}
36234 +#endif
36235 +
36236 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36237 +void pax_track_stack(void)
36238 +{
36239 + unsigned long sp = (unsigned long)&sp;
36240 + if (sp < current_thread_info()->lowest_stack &&
36241 + sp > (unsigned long)task_stack_page(current))
36242 + current_thread_info()->lowest_stack = sp;
36243 +}
36244 +EXPORT_SYMBOL(pax_track_stack);
36245 +#endif
36246 +
36247 static int zap_process(struct task_struct *start, int exit_code)
36248 {
36249 struct task_struct *t;
36250 @@ -1855,17 +2158,17 @@ static void wait_for_dump_helpers(struct
36251 pipe = file->f_path.dentry->d_inode->i_pipe;
36252
36253 pipe_lock(pipe);
36254 - pipe->readers++;
36255 - pipe->writers--;
36256 + atomic_inc(&pipe->readers);
36257 + atomic_dec(&pipe->writers);
36258
36259 - while ((pipe->readers > 1) && (!signal_pending(current))) {
36260 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36261 wake_up_interruptible_sync(&pipe->wait);
36262 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36263 pipe_wait(pipe);
36264 }
36265
36266 - pipe->readers--;
36267 - pipe->writers++;
36268 + atomic_dec(&pipe->readers);
36269 + atomic_inc(&pipe->writers);
36270 pipe_unlock(pipe);
36271
36272 }
36273 @@ -1926,7 +2229,7 @@ void do_coredump(long signr, int exit_co
36274 int retval = 0;
36275 int flag = 0;
36276 int ispipe;
36277 - static atomic_t core_dump_count = ATOMIC_INIT(0);
36278 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36279 struct coredump_params cprm = {
36280 .signr = signr,
36281 .regs = regs,
36282 @@ -1941,6 +2244,9 @@ void do_coredump(long signr, int exit_co
36283
36284 audit_core_dumps(signr);
36285
36286 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36287 + gr_handle_brute_attach(current, cprm.mm_flags);
36288 +
36289 binfmt = mm->binfmt;
36290 if (!binfmt || !binfmt->core_dump)
36291 goto fail;
36292 @@ -1981,6 +2287,8 @@ void do_coredump(long signr, int exit_co
36293 goto fail_corename;
36294 }
36295
36296 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36297 +
36298 if (ispipe) {
36299 int dump_count;
36300 char **helper_argv;
36301 @@ -2008,7 +2316,7 @@ void do_coredump(long signr, int exit_co
36302 }
36303 cprm.limit = RLIM_INFINITY;
36304
36305 - dump_count = atomic_inc_return(&core_dump_count);
36306 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
36307 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36308 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36309 task_tgid_vnr(current), current->comm);
36310 @@ -2078,7 +2386,7 @@ close_fail:
36311 filp_close(cprm.file, NULL);
36312 fail_dropcount:
36313 if (ispipe)
36314 - atomic_dec(&core_dump_count);
36315 + atomic_dec_unchecked(&core_dump_count);
36316 fail_unlock:
36317 kfree(cn.corename);
36318 fail_corename:
36319 diff -urNp linux-2.6.39.4/fs/ext2/balloc.c linux-2.6.39.4/fs/ext2/balloc.c
36320 --- linux-2.6.39.4/fs/ext2/balloc.c 2011-05-19 00:06:34.000000000 -0400
36321 +++ linux-2.6.39.4/fs/ext2/balloc.c 2011-08-05 19:44:37.000000000 -0400
36322 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36323
36324 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36325 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36326 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36327 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36328 sbi->s_resuid != current_fsuid() &&
36329 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36330 return 0;
36331 diff -urNp linux-2.6.39.4/fs/ext3/balloc.c linux-2.6.39.4/fs/ext3/balloc.c
36332 --- linux-2.6.39.4/fs/ext3/balloc.c 2011-05-19 00:06:34.000000000 -0400
36333 +++ linux-2.6.39.4/fs/ext3/balloc.c 2011-08-05 19:44:37.000000000 -0400
36334 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36335
36336 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36337 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36338 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36339 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36340 sbi->s_resuid != current_fsuid() &&
36341 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36342 return 0;
36343 diff -urNp linux-2.6.39.4/fs/ext4/balloc.c linux-2.6.39.4/fs/ext4/balloc.c
36344 --- linux-2.6.39.4/fs/ext4/balloc.c 2011-05-19 00:06:34.000000000 -0400
36345 +++ linux-2.6.39.4/fs/ext4/balloc.c 2011-08-05 19:44:37.000000000 -0400
36346 @@ -522,7 +522,7 @@ static int ext4_has_free_blocks(struct e
36347 /* Hm, nope. Are (enough) root reserved blocks available? */
36348 if (sbi->s_resuid == current_fsuid() ||
36349 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36350 - capable(CAP_SYS_RESOURCE)) {
36351 + capable_nolog(CAP_SYS_RESOURCE)) {
36352 if (free_blocks >= (nblocks + dirty_blocks))
36353 return 1;
36354 }
36355 diff -urNp linux-2.6.39.4/fs/ext4/ext4.h linux-2.6.39.4/fs/ext4/ext4.h
36356 --- linux-2.6.39.4/fs/ext4/ext4.h 2011-06-03 00:04:14.000000000 -0400
36357 +++ linux-2.6.39.4/fs/ext4/ext4.h 2011-08-05 19:44:37.000000000 -0400
36358 @@ -1166,19 +1166,19 @@ struct ext4_sb_info {
36359 unsigned long s_mb_last_start;
36360
36361 /* stats for buddy allocator */
36362 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36363 - atomic_t s_bal_success; /* we found long enough chunks */
36364 - atomic_t s_bal_allocated; /* in blocks */
36365 - atomic_t s_bal_ex_scanned; /* total extents scanned */
36366 - atomic_t s_bal_goals; /* goal hits */
36367 - atomic_t s_bal_breaks; /* too long searches */
36368 - atomic_t s_bal_2orders; /* 2^order hits */
36369 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36370 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36371 + atomic_unchecked_t s_bal_allocated; /* in blocks */
36372 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36373 + atomic_unchecked_t s_bal_goals; /* goal hits */
36374 + atomic_unchecked_t s_bal_breaks; /* too long searches */
36375 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36376 spinlock_t s_bal_lock;
36377 unsigned long s_mb_buddies_generated;
36378 unsigned long long s_mb_generation_time;
36379 - atomic_t s_mb_lost_chunks;
36380 - atomic_t s_mb_preallocated;
36381 - atomic_t s_mb_discarded;
36382 + atomic_unchecked_t s_mb_lost_chunks;
36383 + atomic_unchecked_t s_mb_preallocated;
36384 + atomic_unchecked_t s_mb_discarded;
36385 atomic_t s_lock_busy;
36386
36387 /* locality groups */
36388 diff -urNp linux-2.6.39.4/fs/ext4/mballoc.c linux-2.6.39.4/fs/ext4/mballoc.c
36389 --- linux-2.6.39.4/fs/ext4/mballoc.c 2011-06-03 00:04:14.000000000 -0400
36390 +++ linux-2.6.39.4/fs/ext4/mballoc.c 2011-08-05 19:44:37.000000000 -0400
36391 @@ -1853,7 +1853,7 @@ void ext4_mb_simple_scan_group(struct ex
36392 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36393
36394 if (EXT4_SB(sb)->s_mb_stats)
36395 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36396 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36397
36398 break;
36399 }
36400 @@ -2147,7 +2147,7 @@ repeat:
36401 ac->ac_status = AC_STATUS_CONTINUE;
36402 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36403 cr = 3;
36404 - atomic_inc(&sbi->s_mb_lost_chunks);
36405 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36406 goto repeat;
36407 }
36408 }
36409 @@ -2190,6 +2190,8 @@ static int ext4_mb_seq_groups_show(struc
36410 ext4_grpblk_t counters[16];
36411 } sg;
36412
36413 + pax_track_stack();
36414 +
36415 group--;
36416 if (group == 0)
36417 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36418 @@ -2613,25 +2615,25 @@ int ext4_mb_release(struct super_block *
36419 if (sbi->s_mb_stats) {
36420 printk(KERN_INFO
36421 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36422 - atomic_read(&sbi->s_bal_allocated),
36423 - atomic_read(&sbi->s_bal_reqs),
36424 - atomic_read(&sbi->s_bal_success));
36425 + atomic_read_unchecked(&sbi->s_bal_allocated),
36426 + atomic_read_unchecked(&sbi->s_bal_reqs),
36427 + atomic_read_unchecked(&sbi->s_bal_success));
36428 printk(KERN_INFO
36429 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36430 "%u 2^N hits, %u breaks, %u lost\n",
36431 - atomic_read(&sbi->s_bal_ex_scanned),
36432 - atomic_read(&sbi->s_bal_goals),
36433 - atomic_read(&sbi->s_bal_2orders),
36434 - atomic_read(&sbi->s_bal_breaks),
36435 - atomic_read(&sbi->s_mb_lost_chunks));
36436 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36437 + atomic_read_unchecked(&sbi->s_bal_goals),
36438 + atomic_read_unchecked(&sbi->s_bal_2orders),
36439 + atomic_read_unchecked(&sbi->s_bal_breaks),
36440 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36441 printk(KERN_INFO
36442 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36443 sbi->s_mb_buddies_generated++,
36444 sbi->s_mb_generation_time);
36445 printk(KERN_INFO
36446 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36447 - atomic_read(&sbi->s_mb_preallocated),
36448 - atomic_read(&sbi->s_mb_discarded));
36449 + atomic_read_unchecked(&sbi->s_mb_preallocated),
36450 + atomic_read_unchecked(&sbi->s_mb_discarded));
36451 }
36452
36453 free_percpu(sbi->s_locality_groups);
36454 @@ -3107,16 +3109,16 @@ static void ext4_mb_collect_stats(struct
36455 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36456
36457 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36458 - atomic_inc(&sbi->s_bal_reqs);
36459 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36460 + atomic_inc_unchecked(&sbi->s_bal_reqs);
36461 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36462 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36463 - atomic_inc(&sbi->s_bal_success);
36464 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36465 + atomic_inc_unchecked(&sbi->s_bal_success);
36466 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36467 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36468 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36469 - atomic_inc(&sbi->s_bal_goals);
36470 + atomic_inc_unchecked(&sbi->s_bal_goals);
36471 if (ac->ac_found > sbi->s_mb_max_to_scan)
36472 - atomic_inc(&sbi->s_bal_breaks);
36473 + atomic_inc_unchecked(&sbi->s_bal_breaks);
36474 }
36475
36476 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36477 @@ -3514,7 +3516,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36478 trace_ext4_mb_new_inode_pa(ac, pa);
36479
36480 ext4_mb_use_inode_pa(ac, pa);
36481 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36482 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36483
36484 ei = EXT4_I(ac->ac_inode);
36485 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36486 @@ -3574,7 +3576,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36487 trace_ext4_mb_new_group_pa(ac, pa);
36488
36489 ext4_mb_use_group_pa(ac, pa);
36490 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36491 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36492
36493 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36494 lg = ac->ac_lg;
36495 @@ -3661,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36496 * from the bitmap and continue.
36497 */
36498 }
36499 - atomic_add(free, &sbi->s_mb_discarded);
36500 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
36501
36502 return err;
36503 }
36504 @@ -3679,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36505 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36506 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36507 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36508 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36509 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36510 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36511
36512 return 0;
36513 diff -urNp linux-2.6.39.4/fs/fcntl.c linux-2.6.39.4/fs/fcntl.c
36514 --- linux-2.6.39.4/fs/fcntl.c 2011-05-19 00:06:34.000000000 -0400
36515 +++ linux-2.6.39.4/fs/fcntl.c 2011-08-05 19:44:37.000000000 -0400
36516 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36517 if (err)
36518 return err;
36519
36520 + if (gr_handle_chroot_fowner(pid, type))
36521 + return -ENOENT;
36522 + if (gr_check_protected_task_fowner(pid, type))
36523 + return -EACCES;
36524 +
36525 f_modown(filp, pid, type, force);
36526 return 0;
36527 }
36528 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36529 switch (cmd) {
36530 case F_DUPFD:
36531 case F_DUPFD_CLOEXEC:
36532 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36533 if (arg >= rlimit(RLIMIT_NOFILE))
36534 break;
36535 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36536 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36537 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36538 * is defined as O_NONBLOCK on some platforms and not on others.
36539 */
36540 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36541 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36542 O_RDONLY | O_WRONLY | O_RDWR |
36543 O_CREAT | O_EXCL | O_NOCTTY |
36544 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36545 __O_SYNC | O_DSYNC | FASYNC |
36546 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36547 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36548 - __FMODE_EXEC | O_PATH
36549 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
36550 ));
36551
36552 fasync_cache = kmem_cache_create("fasync_cache",
36553 diff -urNp linux-2.6.39.4/fs/fifo.c linux-2.6.39.4/fs/fifo.c
36554 --- linux-2.6.39.4/fs/fifo.c 2011-05-19 00:06:34.000000000 -0400
36555 +++ linux-2.6.39.4/fs/fifo.c 2011-08-05 19:44:37.000000000 -0400
36556 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36557 */
36558 filp->f_op = &read_pipefifo_fops;
36559 pipe->r_counter++;
36560 - if (pipe->readers++ == 0)
36561 + if (atomic_inc_return(&pipe->readers) == 1)
36562 wake_up_partner(inode);
36563
36564 - if (!pipe->writers) {
36565 + if (!atomic_read(&pipe->writers)) {
36566 if ((filp->f_flags & O_NONBLOCK)) {
36567 /* suppress POLLHUP until we have
36568 * seen a writer */
36569 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36570 * errno=ENXIO when there is no process reading the FIFO.
36571 */
36572 ret = -ENXIO;
36573 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36574 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36575 goto err;
36576
36577 filp->f_op = &write_pipefifo_fops;
36578 pipe->w_counter++;
36579 - if (!pipe->writers++)
36580 + if (atomic_inc_return(&pipe->writers) == 1)
36581 wake_up_partner(inode);
36582
36583 - if (!pipe->readers) {
36584 + if (!atomic_read(&pipe->readers)) {
36585 wait_for_partner(inode, &pipe->r_counter);
36586 if (signal_pending(current))
36587 goto err_wr;
36588 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36589 */
36590 filp->f_op = &rdwr_pipefifo_fops;
36591
36592 - pipe->readers++;
36593 - pipe->writers++;
36594 + atomic_inc(&pipe->readers);
36595 + atomic_inc(&pipe->writers);
36596 pipe->r_counter++;
36597 pipe->w_counter++;
36598 - if (pipe->readers == 1 || pipe->writers == 1)
36599 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36600 wake_up_partner(inode);
36601 break;
36602
36603 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36604 return 0;
36605
36606 err_rd:
36607 - if (!--pipe->readers)
36608 + if (atomic_dec_and_test(&pipe->readers))
36609 wake_up_interruptible(&pipe->wait);
36610 ret = -ERESTARTSYS;
36611 goto err;
36612
36613 err_wr:
36614 - if (!--pipe->writers)
36615 + if (atomic_dec_and_test(&pipe->writers))
36616 wake_up_interruptible(&pipe->wait);
36617 ret = -ERESTARTSYS;
36618 goto err;
36619
36620 err:
36621 - if (!pipe->readers && !pipe->writers)
36622 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36623 free_pipe_info(inode);
36624
36625 err_nocleanup:
36626 diff -urNp linux-2.6.39.4/fs/file.c linux-2.6.39.4/fs/file.c
36627 --- linux-2.6.39.4/fs/file.c 2011-05-19 00:06:34.000000000 -0400
36628 +++ linux-2.6.39.4/fs/file.c 2011-08-05 19:44:37.000000000 -0400
36629 @@ -15,6 +15,7 @@
36630 #include <linux/slab.h>
36631 #include <linux/vmalloc.h>
36632 #include <linux/file.h>
36633 +#include <linux/security.h>
36634 #include <linux/fdtable.h>
36635 #include <linux/bitops.h>
36636 #include <linux/interrupt.h>
36637 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36638 * N.B. For clone tasks sharing a files structure, this test
36639 * will limit the total number of files that can be opened.
36640 */
36641 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36642 if (nr >= rlimit(RLIMIT_NOFILE))
36643 return -EMFILE;
36644
36645 diff -urNp linux-2.6.39.4/fs/filesystems.c linux-2.6.39.4/fs/filesystems.c
36646 --- linux-2.6.39.4/fs/filesystems.c 2011-05-19 00:06:34.000000000 -0400
36647 +++ linux-2.6.39.4/fs/filesystems.c 2011-08-05 19:44:37.000000000 -0400
36648 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36649 int len = dot ? dot - name : strlen(name);
36650
36651 fs = __get_fs_type(name, len);
36652 +
36653 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
36654 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36655 +#else
36656 if (!fs && (request_module("%.*s", len, name) == 0))
36657 +#endif
36658 fs = __get_fs_type(name, len);
36659
36660 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36661 diff -urNp linux-2.6.39.4/fs/fscache/cookie.c linux-2.6.39.4/fs/fscache/cookie.c
36662 --- linux-2.6.39.4/fs/fscache/cookie.c 2011-05-19 00:06:34.000000000 -0400
36663 +++ linux-2.6.39.4/fs/fscache/cookie.c 2011-08-05 19:44:37.000000000 -0400
36664 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36665 parent ? (char *) parent->def->name : "<no-parent>",
36666 def->name, netfs_data);
36667
36668 - fscache_stat(&fscache_n_acquires);
36669 + fscache_stat_unchecked(&fscache_n_acquires);
36670
36671 /* if there's no parent cookie, then we don't create one here either */
36672 if (!parent) {
36673 - fscache_stat(&fscache_n_acquires_null);
36674 + fscache_stat_unchecked(&fscache_n_acquires_null);
36675 _leave(" [no parent]");
36676 return NULL;
36677 }
36678 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36679 /* allocate and initialise a cookie */
36680 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36681 if (!cookie) {
36682 - fscache_stat(&fscache_n_acquires_oom);
36683 + fscache_stat_unchecked(&fscache_n_acquires_oom);
36684 _leave(" [ENOMEM]");
36685 return NULL;
36686 }
36687 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36688
36689 switch (cookie->def->type) {
36690 case FSCACHE_COOKIE_TYPE_INDEX:
36691 - fscache_stat(&fscache_n_cookie_index);
36692 + fscache_stat_unchecked(&fscache_n_cookie_index);
36693 break;
36694 case FSCACHE_COOKIE_TYPE_DATAFILE:
36695 - fscache_stat(&fscache_n_cookie_data);
36696 + fscache_stat_unchecked(&fscache_n_cookie_data);
36697 break;
36698 default:
36699 - fscache_stat(&fscache_n_cookie_special);
36700 + fscache_stat_unchecked(&fscache_n_cookie_special);
36701 break;
36702 }
36703
36704 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36705 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36706 atomic_dec(&parent->n_children);
36707 __fscache_cookie_put(cookie);
36708 - fscache_stat(&fscache_n_acquires_nobufs);
36709 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36710 _leave(" = NULL");
36711 return NULL;
36712 }
36713 }
36714
36715 - fscache_stat(&fscache_n_acquires_ok);
36716 + fscache_stat_unchecked(&fscache_n_acquires_ok);
36717 _leave(" = %p", cookie);
36718 return cookie;
36719 }
36720 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36721 cache = fscache_select_cache_for_object(cookie->parent);
36722 if (!cache) {
36723 up_read(&fscache_addremove_sem);
36724 - fscache_stat(&fscache_n_acquires_no_cache);
36725 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36726 _leave(" = -ENOMEDIUM [no cache]");
36727 return -ENOMEDIUM;
36728 }
36729 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36730 object = cache->ops->alloc_object(cache, cookie);
36731 fscache_stat_d(&fscache_n_cop_alloc_object);
36732 if (IS_ERR(object)) {
36733 - fscache_stat(&fscache_n_object_no_alloc);
36734 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
36735 ret = PTR_ERR(object);
36736 goto error;
36737 }
36738
36739 - fscache_stat(&fscache_n_object_alloc);
36740 + fscache_stat_unchecked(&fscache_n_object_alloc);
36741
36742 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36743
36744 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36745 struct fscache_object *object;
36746 struct hlist_node *_p;
36747
36748 - fscache_stat(&fscache_n_updates);
36749 + fscache_stat_unchecked(&fscache_n_updates);
36750
36751 if (!cookie) {
36752 - fscache_stat(&fscache_n_updates_null);
36753 + fscache_stat_unchecked(&fscache_n_updates_null);
36754 _leave(" [no cookie]");
36755 return;
36756 }
36757 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36758 struct fscache_object *object;
36759 unsigned long event;
36760
36761 - fscache_stat(&fscache_n_relinquishes);
36762 + fscache_stat_unchecked(&fscache_n_relinquishes);
36763 if (retire)
36764 - fscache_stat(&fscache_n_relinquishes_retire);
36765 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36766
36767 if (!cookie) {
36768 - fscache_stat(&fscache_n_relinquishes_null);
36769 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
36770 _leave(" [no cookie]");
36771 return;
36772 }
36773 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36774
36775 /* wait for the cookie to finish being instantiated (or to fail) */
36776 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36777 - fscache_stat(&fscache_n_relinquishes_waitcrt);
36778 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36779 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36780 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36781 }
36782 diff -urNp linux-2.6.39.4/fs/fscache/internal.h linux-2.6.39.4/fs/fscache/internal.h
36783 --- linux-2.6.39.4/fs/fscache/internal.h 2011-05-19 00:06:34.000000000 -0400
36784 +++ linux-2.6.39.4/fs/fscache/internal.h 2011-08-05 19:44:37.000000000 -0400
36785 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36786 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36787 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36788
36789 -extern atomic_t fscache_n_op_pend;
36790 -extern atomic_t fscache_n_op_run;
36791 -extern atomic_t fscache_n_op_enqueue;
36792 -extern atomic_t fscache_n_op_deferred_release;
36793 -extern atomic_t fscache_n_op_release;
36794 -extern atomic_t fscache_n_op_gc;
36795 -extern atomic_t fscache_n_op_cancelled;
36796 -extern atomic_t fscache_n_op_rejected;
36797 -
36798 -extern atomic_t fscache_n_attr_changed;
36799 -extern atomic_t fscache_n_attr_changed_ok;
36800 -extern atomic_t fscache_n_attr_changed_nobufs;
36801 -extern atomic_t fscache_n_attr_changed_nomem;
36802 -extern atomic_t fscache_n_attr_changed_calls;
36803 -
36804 -extern atomic_t fscache_n_allocs;
36805 -extern atomic_t fscache_n_allocs_ok;
36806 -extern atomic_t fscache_n_allocs_wait;
36807 -extern atomic_t fscache_n_allocs_nobufs;
36808 -extern atomic_t fscache_n_allocs_intr;
36809 -extern atomic_t fscache_n_allocs_object_dead;
36810 -extern atomic_t fscache_n_alloc_ops;
36811 -extern atomic_t fscache_n_alloc_op_waits;
36812 -
36813 -extern atomic_t fscache_n_retrievals;
36814 -extern atomic_t fscache_n_retrievals_ok;
36815 -extern atomic_t fscache_n_retrievals_wait;
36816 -extern atomic_t fscache_n_retrievals_nodata;
36817 -extern atomic_t fscache_n_retrievals_nobufs;
36818 -extern atomic_t fscache_n_retrievals_intr;
36819 -extern atomic_t fscache_n_retrievals_nomem;
36820 -extern atomic_t fscache_n_retrievals_object_dead;
36821 -extern atomic_t fscache_n_retrieval_ops;
36822 -extern atomic_t fscache_n_retrieval_op_waits;
36823 -
36824 -extern atomic_t fscache_n_stores;
36825 -extern atomic_t fscache_n_stores_ok;
36826 -extern atomic_t fscache_n_stores_again;
36827 -extern atomic_t fscache_n_stores_nobufs;
36828 -extern atomic_t fscache_n_stores_oom;
36829 -extern atomic_t fscache_n_store_ops;
36830 -extern atomic_t fscache_n_store_calls;
36831 -extern atomic_t fscache_n_store_pages;
36832 -extern atomic_t fscache_n_store_radix_deletes;
36833 -extern atomic_t fscache_n_store_pages_over_limit;
36834 -
36835 -extern atomic_t fscache_n_store_vmscan_not_storing;
36836 -extern atomic_t fscache_n_store_vmscan_gone;
36837 -extern atomic_t fscache_n_store_vmscan_busy;
36838 -extern atomic_t fscache_n_store_vmscan_cancelled;
36839 -
36840 -extern atomic_t fscache_n_marks;
36841 -extern atomic_t fscache_n_uncaches;
36842 -
36843 -extern atomic_t fscache_n_acquires;
36844 -extern atomic_t fscache_n_acquires_null;
36845 -extern atomic_t fscache_n_acquires_no_cache;
36846 -extern atomic_t fscache_n_acquires_ok;
36847 -extern atomic_t fscache_n_acquires_nobufs;
36848 -extern atomic_t fscache_n_acquires_oom;
36849 -
36850 -extern atomic_t fscache_n_updates;
36851 -extern atomic_t fscache_n_updates_null;
36852 -extern atomic_t fscache_n_updates_run;
36853 -
36854 -extern atomic_t fscache_n_relinquishes;
36855 -extern atomic_t fscache_n_relinquishes_null;
36856 -extern atomic_t fscache_n_relinquishes_waitcrt;
36857 -extern atomic_t fscache_n_relinquishes_retire;
36858 -
36859 -extern atomic_t fscache_n_cookie_index;
36860 -extern atomic_t fscache_n_cookie_data;
36861 -extern atomic_t fscache_n_cookie_special;
36862 -
36863 -extern atomic_t fscache_n_object_alloc;
36864 -extern atomic_t fscache_n_object_no_alloc;
36865 -extern atomic_t fscache_n_object_lookups;
36866 -extern atomic_t fscache_n_object_lookups_negative;
36867 -extern atomic_t fscache_n_object_lookups_positive;
36868 -extern atomic_t fscache_n_object_lookups_timed_out;
36869 -extern atomic_t fscache_n_object_created;
36870 -extern atomic_t fscache_n_object_avail;
36871 -extern atomic_t fscache_n_object_dead;
36872 -
36873 -extern atomic_t fscache_n_checkaux_none;
36874 -extern atomic_t fscache_n_checkaux_okay;
36875 -extern atomic_t fscache_n_checkaux_update;
36876 -extern atomic_t fscache_n_checkaux_obsolete;
36877 +extern atomic_unchecked_t fscache_n_op_pend;
36878 +extern atomic_unchecked_t fscache_n_op_run;
36879 +extern atomic_unchecked_t fscache_n_op_enqueue;
36880 +extern atomic_unchecked_t fscache_n_op_deferred_release;
36881 +extern atomic_unchecked_t fscache_n_op_release;
36882 +extern atomic_unchecked_t fscache_n_op_gc;
36883 +extern atomic_unchecked_t fscache_n_op_cancelled;
36884 +extern atomic_unchecked_t fscache_n_op_rejected;
36885 +
36886 +extern atomic_unchecked_t fscache_n_attr_changed;
36887 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
36888 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36889 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36890 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
36891 +
36892 +extern atomic_unchecked_t fscache_n_allocs;
36893 +extern atomic_unchecked_t fscache_n_allocs_ok;
36894 +extern atomic_unchecked_t fscache_n_allocs_wait;
36895 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
36896 +extern atomic_unchecked_t fscache_n_allocs_intr;
36897 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
36898 +extern atomic_unchecked_t fscache_n_alloc_ops;
36899 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
36900 +
36901 +extern atomic_unchecked_t fscache_n_retrievals;
36902 +extern atomic_unchecked_t fscache_n_retrievals_ok;
36903 +extern atomic_unchecked_t fscache_n_retrievals_wait;
36904 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
36905 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36906 +extern atomic_unchecked_t fscache_n_retrievals_intr;
36907 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
36908 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36909 +extern atomic_unchecked_t fscache_n_retrieval_ops;
36910 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36911 +
36912 +extern atomic_unchecked_t fscache_n_stores;
36913 +extern atomic_unchecked_t fscache_n_stores_ok;
36914 +extern atomic_unchecked_t fscache_n_stores_again;
36915 +extern atomic_unchecked_t fscache_n_stores_nobufs;
36916 +extern atomic_unchecked_t fscache_n_stores_oom;
36917 +extern atomic_unchecked_t fscache_n_store_ops;
36918 +extern atomic_unchecked_t fscache_n_store_calls;
36919 +extern atomic_unchecked_t fscache_n_store_pages;
36920 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
36921 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36922 +
36923 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36924 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36925 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36926 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36927 +
36928 +extern atomic_unchecked_t fscache_n_marks;
36929 +extern atomic_unchecked_t fscache_n_uncaches;
36930 +
36931 +extern atomic_unchecked_t fscache_n_acquires;
36932 +extern atomic_unchecked_t fscache_n_acquires_null;
36933 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
36934 +extern atomic_unchecked_t fscache_n_acquires_ok;
36935 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
36936 +extern atomic_unchecked_t fscache_n_acquires_oom;
36937 +
36938 +extern atomic_unchecked_t fscache_n_updates;
36939 +extern atomic_unchecked_t fscache_n_updates_null;
36940 +extern atomic_unchecked_t fscache_n_updates_run;
36941 +
36942 +extern atomic_unchecked_t fscache_n_relinquishes;
36943 +extern atomic_unchecked_t fscache_n_relinquishes_null;
36944 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36945 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
36946 +
36947 +extern atomic_unchecked_t fscache_n_cookie_index;
36948 +extern atomic_unchecked_t fscache_n_cookie_data;
36949 +extern atomic_unchecked_t fscache_n_cookie_special;
36950 +
36951 +extern atomic_unchecked_t fscache_n_object_alloc;
36952 +extern atomic_unchecked_t fscache_n_object_no_alloc;
36953 +extern atomic_unchecked_t fscache_n_object_lookups;
36954 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
36955 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
36956 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36957 +extern atomic_unchecked_t fscache_n_object_created;
36958 +extern atomic_unchecked_t fscache_n_object_avail;
36959 +extern atomic_unchecked_t fscache_n_object_dead;
36960 +
36961 +extern atomic_unchecked_t fscache_n_checkaux_none;
36962 +extern atomic_unchecked_t fscache_n_checkaux_okay;
36963 +extern atomic_unchecked_t fscache_n_checkaux_update;
36964 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36965
36966 extern atomic_t fscache_n_cop_alloc_object;
36967 extern atomic_t fscache_n_cop_lookup_object;
36968 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36969 atomic_inc(stat);
36970 }
36971
36972 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36973 +{
36974 + atomic_inc_unchecked(stat);
36975 +}
36976 +
36977 static inline void fscache_stat_d(atomic_t *stat)
36978 {
36979 atomic_dec(stat);
36980 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
36981
36982 #define __fscache_stat(stat) (NULL)
36983 #define fscache_stat(stat) do {} while (0)
36984 +#define fscache_stat_unchecked(stat) do {} while (0)
36985 #define fscache_stat_d(stat) do {} while (0)
36986 #endif
36987
36988 diff -urNp linux-2.6.39.4/fs/fscache/object.c linux-2.6.39.4/fs/fscache/object.c
36989 --- linux-2.6.39.4/fs/fscache/object.c 2011-05-19 00:06:34.000000000 -0400
36990 +++ linux-2.6.39.4/fs/fscache/object.c 2011-08-05 19:44:37.000000000 -0400
36991 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
36992 /* update the object metadata on disk */
36993 case FSCACHE_OBJECT_UPDATING:
36994 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36995 - fscache_stat(&fscache_n_updates_run);
36996 + fscache_stat_unchecked(&fscache_n_updates_run);
36997 fscache_stat(&fscache_n_cop_update_object);
36998 object->cache->ops->update_object(object);
36999 fscache_stat_d(&fscache_n_cop_update_object);
37000 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
37001 spin_lock(&object->lock);
37002 object->state = FSCACHE_OBJECT_DEAD;
37003 spin_unlock(&object->lock);
37004 - fscache_stat(&fscache_n_object_dead);
37005 + fscache_stat_unchecked(&fscache_n_object_dead);
37006 goto terminal_transit;
37007
37008 /* handle the parent cache of this object being withdrawn from
37009 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
37010 spin_lock(&object->lock);
37011 object->state = FSCACHE_OBJECT_DEAD;
37012 spin_unlock(&object->lock);
37013 - fscache_stat(&fscache_n_object_dead);
37014 + fscache_stat_unchecked(&fscache_n_object_dead);
37015 goto terminal_transit;
37016
37017 /* complain about the object being woken up once it is
37018 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
37019 parent->cookie->def->name, cookie->def->name,
37020 object->cache->tag->name);
37021
37022 - fscache_stat(&fscache_n_object_lookups);
37023 + fscache_stat_unchecked(&fscache_n_object_lookups);
37024 fscache_stat(&fscache_n_cop_lookup_object);
37025 ret = object->cache->ops->lookup_object(object);
37026 fscache_stat_d(&fscache_n_cop_lookup_object);
37027 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
37028 if (ret == -ETIMEDOUT) {
37029 /* probably stuck behind another object, so move this one to
37030 * the back of the queue */
37031 - fscache_stat(&fscache_n_object_lookups_timed_out);
37032 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
37033 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37034 }
37035
37036 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
37037
37038 spin_lock(&object->lock);
37039 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37040 - fscache_stat(&fscache_n_object_lookups_negative);
37041 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
37042
37043 /* transit here to allow write requests to begin stacking up
37044 * and read requests to begin returning ENODATA */
37045 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
37046 * result, in which case there may be data available */
37047 spin_lock(&object->lock);
37048 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37049 - fscache_stat(&fscache_n_object_lookups_positive);
37050 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
37051
37052 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
37053
37054 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
37055 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37056 } else {
37057 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
37058 - fscache_stat(&fscache_n_object_created);
37059 + fscache_stat_unchecked(&fscache_n_object_created);
37060
37061 object->state = FSCACHE_OBJECT_AVAILABLE;
37062 spin_unlock(&object->lock);
37063 @@ -602,7 +602,7 @@ static void fscache_object_available(str
37064 fscache_enqueue_dependents(object);
37065
37066 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
37067 - fscache_stat(&fscache_n_object_avail);
37068 + fscache_stat_unchecked(&fscache_n_object_avail);
37069
37070 _leave("");
37071 }
37072 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
37073 enum fscache_checkaux result;
37074
37075 if (!object->cookie->def->check_aux) {
37076 - fscache_stat(&fscache_n_checkaux_none);
37077 + fscache_stat_unchecked(&fscache_n_checkaux_none);
37078 return FSCACHE_CHECKAUX_OKAY;
37079 }
37080
37081 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
37082 switch (result) {
37083 /* entry okay as is */
37084 case FSCACHE_CHECKAUX_OKAY:
37085 - fscache_stat(&fscache_n_checkaux_okay);
37086 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
37087 break;
37088
37089 /* entry requires update */
37090 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
37091 - fscache_stat(&fscache_n_checkaux_update);
37092 + fscache_stat_unchecked(&fscache_n_checkaux_update);
37093 break;
37094
37095 /* entry requires deletion */
37096 case FSCACHE_CHECKAUX_OBSOLETE:
37097 - fscache_stat(&fscache_n_checkaux_obsolete);
37098 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
37099 break;
37100
37101 default:
37102 diff -urNp linux-2.6.39.4/fs/fscache/operation.c linux-2.6.39.4/fs/fscache/operation.c
37103 --- linux-2.6.39.4/fs/fscache/operation.c 2011-05-19 00:06:34.000000000 -0400
37104 +++ linux-2.6.39.4/fs/fscache/operation.c 2011-08-05 19:44:37.000000000 -0400
37105 @@ -17,7 +17,7 @@
37106 #include <linux/slab.h>
37107 #include "internal.h"
37108
37109 -atomic_t fscache_op_debug_id;
37110 +atomic_unchecked_t fscache_op_debug_id;
37111 EXPORT_SYMBOL(fscache_op_debug_id);
37112
37113 /**
37114 @@ -40,7 +40,7 @@ void fscache_enqueue_operation(struct fs
37115 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
37116 ASSERTCMP(atomic_read(&op->usage), >, 0);
37117
37118 - fscache_stat(&fscache_n_op_enqueue);
37119 + fscache_stat_unchecked(&fscache_n_op_enqueue);
37120 switch (op->flags & FSCACHE_OP_TYPE) {
37121 case FSCACHE_OP_ASYNC:
37122 _debug("queue async");
37123 @@ -73,7 +73,7 @@ static void fscache_run_op(struct fscach
37124 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
37125 if (op->processor)
37126 fscache_enqueue_operation(op);
37127 - fscache_stat(&fscache_n_op_run);
37128 + fscache_stat_unchecked(&fscache_n_op_run);
37129 }
37130
37131 /*
37132 @@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct f
37133 if (object->n_ops > 1) {
37134 atomic_inc(&op->usage);
37135 list_add_tail(&op->pend_link, &object->pending_ops);
37136 - fscache_stat(&fscache_n_op_pend);
37137 + fscache_stat_unchecked(&fscache_n_op_pend);
37138 } else if (!list_empty(&object->pending_ops)) {
37139 atomic_inc(&op->usage);
37140 list_add_tail(&op->pend_link, &object->pending_ops);
37141 - fscache_stat(&fscache_n_op_pend);
37142 + fscache_stat_unchecked(&fscache_n_op_pend);
37143 fscache_start_operations(object);
37144 } else {
37145 ASSERTCMP(object->n_in_progress, ==, 0);
37146 @@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct f
37147 object->n_exclusive++; /* reads and writes must wait */
37148 atomic_inc(&op->usage);
37149 list_add_tail(&op->pend_link, &object->pending_ops);
37150 - fscache_stat(&fscache_n_op_pend);
37151 + fscache_stat_unchecked(&fscache_n_op_pend);
37152 ret = 0;
37153 } else {
37154 /* not allowed to submit ops in any other state */
37155 @@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_obj
37156 if (object->n_exclusive > 0) {
37157 atomic_inc(&op->usage);
37158 list_add_tail(&op->pend_link, &object->pending_ops);
37159 - fscache_stat(&fscache_n_op_pend);
37160 + fscache_stat_unchecked(&fscache_n_op_pend);
37161 } else if (!list_empty(&object->pending_ops)) {
37162 atomic_inc(&op->usage);
37163 list_add_tail(&op->pend_link, &object->pending_ops);
37164 - fscache_stat(&fscache_n_op_pend);
37165 + fscache_stat_unchecked(&fscache_n_op_pend);
37166 fscache_start_operations(object);
37167 } else {
37168 ASSERTCMP(object->n_exclusive, ==, 0);
37169 @@ -227,12 +227,12 @@ int fscache_submit_op(struct fscache_obj
37170 object->n_ops++;
37171 atomic_inc(&op->usage);
37172 list_add_tail(&op->pend_link, &object->pending_ops);
37173 - fscache_stat(&fscache_n_op_pend);
37174 + fscache_stat_unchecked(&fscache_n_op_pend);
37175 ret = 0;
37176 } else if (object->state == FSCACHE_OBJECT_DYING ||
37177 object->state == FSCACHE_OBJECT_LC_DYING ||
37178 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37179 - fscache_stat(&fscache_n_op_rejected);
37180 + fscache_stat_unchecked(&fscache_n_op_rejected);
37181 ret = -ENOBUFS;
37182 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37183 fscache_report_unexpected_submission(object, op, ostate);
37184 @@ -302,7 +302,7 @@ int fscache_cancel_op(struct fscache_ope
37185
37186 ret = -EBUSY;
37187 if (!list_empty(&op->pend_link)) {
37188 - fscache_stat(&fscache_n_op_cancelled);
37189 + fscache_stat_unchecked(&fscache_n_op_cancelled);
37190 list_del_init(&op->pend_link);
37191 object->n_ops--;
37192 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37193 @@ -341,7 +341,7 @@ void fscache_put_operation(struct fscach
37194 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37195 BUG();
37196
37197 - fscache_stat(&fscache_n_op_release);
37198 + fscache_stat_unchecked(&fscache_n_op_release);
37199
37200 if (op->release) {
37201 op->release(op);
37202 @@ -358,7 +358,7 @@ void fscache_put_operation(struct fscach
37203 * lock, and defer it otherwise */
37204 if (!spin_trylock(&object->lock)) {
37205 _debug("defer put");
37206 - fscache_stat(&fscache_n_op_deferred_release);
37207 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
37208
37209 cache = object->cache;
37210 spin_lock(&cache->op_gc_list_lock);
37211 @@ -420,7 +420,7 @@ void fscache_operation_gc(struct work_st
37212
37213 _debug("GC DEFERRED REL OBJ%x OP%x",
37214 object->debug_id, op->debug_id);
37215 - fscache_stat(&fscache_n_op_gc);
37216 + fscache_stat_unchecked(&fscache_n_op_gc);
37217
37218 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37219
37220 diff -urNp linux-2.6.39.4/fs/fscache/page.c linux-2.6.39.4/fs/fscache/page.c
37221 --- linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:11:51.000000000 -0400
37222 +++ linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:12:20.000000000 -0400
37223 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37224 val = radix_tree_lookup(&cookie->stores, page->index);
37225 if (!val) {
37226 rcu_read_unlock();
37227 - fscache_stat(&fscache_n_store_vmscan_not_storing);
37228 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37229 __fscache_uncache_page(cookie, page);
37230 return true;
37231 }
37232 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37233 spin_unlock(&cookie->stores_lock);
37234
37235 if (xpage) {
37236 - fscache_stat(&fscache_n_store_vmscan_cancelled);
37237 - fscache_stat(&fscache_n_store_radix_deletes);
37238 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37239 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37240 ASSERTCMP(xpage, ==, page);
37241 } else {
37242 - fscache_stat(&fscache_n_store_vmscan_gone);
37243 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37244 }
37245
37246 wake_up_bit(&cookie->flags, 0);
37247 @@ -107,7 +107,7 @@ page_busy:
37248 /* we might want to wait here, but that could deadlock the allocator as
37249 * the work threads writing to the cache may all end up sleeping
37250 * on memory allocation */
37251 - fscache_stat(&fscache_n_store_vmscan_busy);
37252 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37253 return false;
37254 }
37255 EXPORT_SYMBOL(__fscache_maybe_release_page);
37256 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37257 FSCACHE_COOKIE_STORING_TAG);
37258 if (!radix_tree_tag_get(&cookie->stores, page->index,
37259 FSCACHE_COOKIE_PENDING_TAG)) {
37260 - fscache_stat(&fscache_n_store_radix_deletes);
37261 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37262 xpage = radix_tree_delete(&cookie->stores, page->index);
37263 }
37264 spin_unlock(&cookie->stores_lock);
37265 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37266
37267 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37268
37269 - fscache_stat(&fscache_n_attr_changed_calls);
37270 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37271
37272 if (fscache_object_is_active(object)) {
37273 fscache_set_op_state(op, "CallFS");
37274 @@ -179,11 +179,11 @@ int __fscache_attr_changed(struct fscach
37275
37276 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37277
37278 - fscache_stat(&fscache_n_attr_changed);
37279 + fscache_stat_unchecked(&fscache_n_attr_changed);
37280
37281 op = kzalloc(sizeof(*op), GFP_KERNEL);
37282 if (!op) {
37283 - fscache_stat(&fscache_n_attr_changed_nomem);
37284 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37285 _leave(" = -ENOMEM");
37286 return -ENOMEM;
37287 }
37288 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
37289 if (fscache_submit_exclusive_op(object, op) < 0)
37290 goto nobufs;
37291 spin_unlock(&cookie->lock);
37292 - fscache_stat(&fscache_n_attr_changed_ok);
37293 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37294 fscache_put_operation(op);
37295 _leave(" = 0");
37296 return 0;
37297 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
37298 nobufs:
37299 spin_unlock(&cookie->lock);
37300 kfree(op);
37301 - fscache_stat(&fscache_n_attr_changed_nobufs);
37302 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37303 _leave(" = %d", -ENOBUFS);
37304 return -ENOBUFS;
37305 }
37306 @@ -246,7 +246,7 @@ static struct fscache_retrieval *fscache
37307 /* allocate a retrieval operation and attempt to submit it */
37308 op = kzalloc(sizeof(*op), GFP_NOIO);
37309 if (!op) {
37310 - fscache_stat(&fscache_n_retrievals_nomem);
37311 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37312 return NULL;
37313 }
37314
37315 @@ -275,13 +275,13 @@ static int fscache_wait_for_deferred_loo
37316 return 0;
37317 }
37318
37319 - fscache_stat(&fscache_n_retrievals_wait);
37320 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
37321
37322 jif = jiffies;
37323 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37324 fscache_wait_bit_interruptible,
37325 TASK_INTERRUPTIBLE) != 0) {
37326 - fscache_stat(&fscache_n_retrievals_intr);
37327 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37328 _leave(" = -ERESTARTSYS");
37329 return -ERESTARTSYS;
37330 }
37331 @@ -299,8 +299,8 @@ static int fscache_wait_for_deferred_loo
37332 */
37333 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37334 struct fscache_retrieval *op,
37335 - atomic_t *stat_op_waits,
37336 - atomic_t *stat_object_dead)
37337 + atomic_unchecked_t *stat_op_waits,
37338 + atomic_unchecked_t *stat_object_dead)
37339 {
37340 int ret;
37341
37342 @@ -308,7 +308,7 @@ static int fscache_wait_for_retrieval_ac
37343 goto check_if_dead;
37344
37345 _debug(">>> WT");
37346 - fscache_stat(stat_op_waits);
37347 + fscache_stat_unchecked(stat_op_waits);
37348 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37349 fscache_wait_bit_interruptible,
37350 TASK_INTERRUPTIBLE) < 0) {
37351 @@ -325,7 +325,7 @@ static int fscache_wait_for_retrieval_ac
37352
37353 check_if_dead:
37354 if (unlikely(fscache_object_is_dead(object))) {
37355 - fscache_stat(stat_object_dead);
37356 + fscache_stat_unchecked(stat_object_dead);
37357 return -ENOBUFS;
37358 }
37359 return 0;
37360 @@ -352,7 +352,7 @@ int __fscache_read_or_alloc_page(struct
37361
37362 _enter("%p,%p,,,", cookie, page);
37363
37364 - fscache_stat(&fscache_n_retrievals);
37365 + fscache_stat_unchecked(&fscache_n_retrievals);
37366
37367 if (hlist_empty(&cookie->backing_objects))
37368 goto nobufs;
37369 @@ -386,7 +386,7 @@ int __fscache_read_or_alloc_page(struct
37370 goto nobufs_unlock;
37371 spin_unlock(&cookie->lock);
37372
37373 - fscache_stat(&fscache_n_retrieval_ops);
37374 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37375
37376 /* pin the netfs read context in case we need to do the actual netfs
37377 * read because we've encountered a cache read failure */
37378 @@ -416,15 +416,15 @@ int __fscache_read_or_alloc_page(struct
37379
37380 error:
37381 if (ret == -ENOMEM)
37382 - fscache_stat(&fscache_n_retrievals_nomem);
37383 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37384 else if (ret == -ERESTARTSYS)
37385 - fscache_stat(&fscache_n_retrievals_intr);
37386 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37387 else if (ret == -ENODATA)
37388 - fscache_stat(&fscache_n_retrievals_nodata);
37389 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37390 else if (ret < 0)
37391 - fscache_stat(&fscache_n_retrievals_nobufs);
37392 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37393 else
37394 - fscache_stat(&fscache_n_retrievals_ok);
37395 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37396
37397 fscache_put_retrieval(op);
37398 _leave(" = %d", ret);
37399 @@ -434,7 +434,7 @@ nobufs_unlock:
37400 spin_unlock(&cookie->lock);
37401 kfree(op);
37402 nobufs:
37403 - fscache_stat(&fscache_n_retrievals_nobufs);
37404 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37405 _leave(" = -ENOBUFS");
37406 return -ENOBUFS;
37407 }
37408 @@ -472,7 +472,7 @@ int __fscache_read_or_alloc_pages(struct
37409
37410 _enter("%p,,%d,,,", cookie, *nr_pages);
37411
37412 - fscache_stat(&fscache_n_retrievals);
37413 + fscache_stat_unchecked(&fscache_n_retrievals);
37414
37415 if (hlist_empty(&cookie->backing_objects))
37416 goto nobufs;
37417 @@ -503,7 +503,7 @@ int __fscache_read_or_alloc_pages(struct
37418 goto nobufs_unlock;
37419 spin_unlock(&cookie->lock);
37420
37421 - fscache_stat(&fscache_n_retrieval_ops);
37422 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37423
37424 /* pin the netfs read context in case we need to do the actual netfs
37425 * read because we've encountered a cache read failure */
37426 @@ -533,15 +533,15 @@ int __fscache_read_or_alloc_pages(struct
37427
37428 error:
37429 if (ret == -ENOMEM)
37430 - fscache_stat(&fscache_n_retrievals_nomem);
37431 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37432 else if (ret == -ERESTARTSYS)
37433 - fscache_stat(&fscache_n_retrievals_intr);
37434 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37435 else if (ret == -ENODATA)
37436 - fscache_stat(&fscache_n_retrievals_nodata);
37437 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37438 else if (ret < 0)
37439 - fscache_stat(&fscache_n_retrievals_nobufs);
37440 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37441 else
37442 - fscache_stat(&fscache_n_retrievals_ok);
37443 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37444
37445 fscache_put_retrieval(op);
37446 _leave(" = %d", ret);
37447 @@ -551,7 +551,7 @@ nobufs_unlock:
37448 spin_unlock(&cookie->lock);
37449 kfree(op);
37450 nobufs:
37451 - fscache_stat(&fscache_n_retrievals_nobufs);
37452 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37453 _leave(" = -ENOBUFS");
37454 return -ENOBUFS;
37455 }
37456 @@ -575,7 +575,7 @@ int __fscache_alloc_page(struct fscache_
37457
37458 _enter("%p,%p,,,", cookie, page);
37459
37460 - fscache_stat(&fscache_n_allocs);
37461 + fscache_stat_unchecked(&fscache_n_allocs);
37462
37463 if (hlist_empty(&cookie->backing_objects))
37464 goto nobufs;
37465 @@ -602,7 +602,7 @@ int __fscache_alloc_page(struct fscache_
37466 goto nobufs_unlock;
37467 spin_unlock(&cookie->lock);
37468
37469 - fscache_stat(&fscache_n_alloc_ops);
37470 + fscache_stat_unchecked(&fscache_n_alloc_ops);
37471
37472 ret = fscache_wait_for_retrieval_activation(
37473 object, op,
37474 @@ -618,11 +618,11 @@ int __fscache_alloc_page(struct fscache_
37475
37476 error:
37477 if (ret == -ERESTARTSYS)
37478 - fscache_stat(&fscache_n_allocs_intr);
37479 + fscache_stat_unchecked(&fscache_n_allocs_intr);
37480 else if (ret < 0)
37481 - fscache_stat(&fscache_n_allocs_nobufs);
37482 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37483 else
37484 - fscache_stat(&fscache_n_allocs_ok);
37485 + fscache_stat_unchecked(&fscache_n_allocs_ok);
37486
37487 fscache_put_retrieval(op);
37488 _leave(" = %d", ret);
37489 @@ -632,7 +632,7 @@ nobufs_unlock:
37490 spin_unlock(&cookie->lock);
37491 kfree(op);
37492 nobufs:
37493 - fscache_stat(&fscache_n_allocs_nobufs);
37494 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37495 _leave(" = -ENOBUFS");
37496 return -ENOBUFS;
37497 }
37498 @@ -675,7 +675,7 @@ static void fscache_write_op(struct fsca
37499
37500 spin_lock(&cookie->stores_lock);
37501
37502 - fscache_stat(&fscache_n_store_calls);
37503 + fscache_stat_unchecked(&fscache_n_store_calls);
37504
37505 /* find a page to store */
37506 page = NULL;
37507 @@ -686,7 +686,7 @@ static void fscache_write_op(struct fsca
37508 page = results[0];
37509 _debug("gang %d [%lx]", n, page->index);
37510 if (page->index > op->store_limit) {
37511 - fscache_stat(&fscache_n_store_pages_over_limit);
37512 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37513 goto superseded;
37514 }
37515
37516 @@ -699,7 +699,7 @@ static void fscache_write_op(struct fsca
37517 spin_unlock(&object->lock);
37518
37519 fscache_set_op_state(&op->op, "Store");
37520 - fscache_stat(&fscache_n_store_pages);
37521 + fscache_stat_unchecked(&fscache_n_store_pages);
37522 fscache_stat(&fscache_n_cop_write_page);
37523 ret = object->cache->ops->write_page(op, page);
37524 fscache_stat_d(&fscache_n_cop_write_page);
37525 @@ -769,7 +769,7 @@ int __fscache_write_page(struct fscache_
37526 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37527 ASSERT(PageFsCache(page));
37528
37529 - fscache_stat(&fscache_n_stores);
37530 + fscache_stat_unchecked(&fscache_n_stores);
37531
37532 op = kzalloc(sizeof(*op), GFP_NOIO);
37533 if (!op)
37534 @@ -821,7 +821,7 @@ int __fscache_write_page(struct fscache_
37535 spin_unlock(&cookie->stores_lock);
37536 spin_unlock(&object->lock);
37537
37538 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37539 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37540 op->store_limit = object->store_limit;
37541
37542 if (fscache_submit_op(object, &op->op) < 0)
37543 @@ -829,8 +829,8 @@ int __fscache_write_page(struct fscache_
37544
37545 spin_unlock(&cookie->lock);
37546 radix_tree_preload_end();
37547 - fscache_stat(&fscache_n_store_ops);
37548 - fscache_stat(&fscache_n_stores_ok);
37549 + fscache_stat_unchecked(&fscache_n_store_ops);
37550 + fscache_stat_unchecked(&fscache_n_stores_ok);
37551
37552 /* the work queue now carries its own ref on the object */
37553 fscache_put_operation(&op->op);
37554 @@ -838,14 +838,14 @@ int __fscache_write_page(struct fscache_
37555 return 0;
37556
37557 already_queued:
37558 - fscache_stat(&fscache_n_stores_again);
37559 + fscache_stat_unchecked(&fscache_n_stores_again);
37560 already_pending:
37561 spin_unlock(&cookie->stores_lock);
37562 spin_unlock(&object->lock);
37563 spin_unlock(&cookie->lock);
37564 radix_tree_preload_end();
37565 kfree(op);
37566 - fscache_stat(&fscache_n_stores_ok);
37567 + fscache_stat_unchecked(&fscache_n_stores_ok);
37568 _leave(" = 0");
37569 return 0;
37570
37571 @@ -864,14 +864,14 @@ nobufs:
37572 spin_unlock(&cookie->lock);
37573 radix_tree_preload_end();
37574 kfree(op);
37575 - fscache_stat(&fscache_n_stores_nobufs);
37576 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
37577 _leave(" = -ENOBUFS");
37578 return -ENOBUFS;
37579
37580 nomem_free:
37581 kfree(op);
37582 nomem:
37583 - fscache_stat(&fscache_n_stores_oom);
37584 + fscache_stat_unchecked(&fscache_n_stores_oom);
37585 _leave(" = -ENOMEM");
37586 return -ENOMEM;
37587 }
37588 @@ -889,7 +889,7 @@ void __fscache_uncache_page(struct fscac
37589 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37590 ASSERTCMP(page, !=, NULL);
37591
37592 - fscache_stat(&fscache_n_uncaches);
37593 + fscache_stat_unchecked(&fscache_n_uncaches);
37594
37595 /* cache withdrawal may beat us to it */
37596 if (!PageFsCache(page))
37597 @@ -942,7 +942,7 @@ void fscache_mark_pages_cached(struct fs
37598 unsigned long loop;
37599
37600 #ifdef CONFIG_FSCACHE_STATS
37601 - atomic_add(pagevec->nr, &fscache_n_marks);
37602 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37603 #endif
37604
37605 for (loop = 0; loop < pagevec->nr; loop++) {
37606 diff -urNp linux-2.6.39.4/fs/fscache/stats.c linux-2.6.39.4/fs/fscache/stats.c
37607 --- linux-2.6.39.4/fs/fscache/stats.c 2011-05-19 00:06:34.000000000 -0400
37608 +++ linux-2.6.39.4/fs/fscache/stats.c 2011-08-05 19:44:37.000000000 -0400
37609 @@ -18,95 +18,95 @@
37610 /*
37611 * operation counters
37612 */
37613 -atomic_t fscache_n_op_pend;
37614 -atomic_t fscache_n_op_run;
37615 -atomic_t fscache_n_op_enqueue;
37616 -atomic_t fscache_n_op_requeue;
37617 -atomic_t fscache_n_op_deferred_release;
37618 -atomic_t fscache_n_op_release;
37619 -atomic_t fscache_n_op_gc;
37620 -atomic_t fscache_n_op_cancelled;
37621 -atomic_t fscache_n_op_rejected;
37622 -
37623 -atomic_t fscache_n_attr_changed;
37624 -atomic_t fscache_n_attr_changed_ok;
37625 -atomic_t fscache_n_attr_changed_nobufs;
37626 -atomic_t fscache_n_attr_changed_nomem;
37627 -atomic_t fscache_n_attr_changed_calls;
37628 -
37629 -atomic_t fscache_n_allocs;
37630 -atomic_t fscache_n_allocs_ok;
37631 -atomic_t fscache_n_allocs_wait;
37632 -atomic_t fscache_n_allocs_nobufs;
37633 -atomic_t fscache_n_allocs_intr;
37634 -atomic_t fscache_n_allocs_object_dead;
37635 -atomic_t fscache_n_alloc_ops;
37636 -atomic_t fscache_n_alloc_op_waits;
37637 -
37638 -atomic_t fscache_n_retrievals;
37639 -atomic_t fscache_n_retrievals_ok;
37640 -atomic_t fscache_n_retrievals_wait;
37641 -atomic_t fscache_n_retrievals_nodata;
37642 -atomic_t fscache_n_retrievals_nobufs;
37643 -atomic_t fscache_n_retrievals_intr;
37644 -atomic_t fscache_n_retrievals_nomem;
37645 -atomic_t fscache_n_retrievals_object_dead;
37646 -atomic_t fscache_n_retrieval_ops;
37647 -atomic_t fscache_n_retrieval_op_waits;
37648 -
37649 -atomic_t fscache_n_stores;
37650 -atomic_t fscache_n_stores_ok;
37651 -atomic_t fscache_n_stores_again;
37652 -atomic_t fscache_n_stores_nobufs;
37653 -atomic_t fscache_n_stores_oom;
37654 -atomic_t fscache_n_store_ops;
37655 -atomic_t fscache_n_store_calls;
37656 -atomic_t fscache_n_store_pages;
37657 -atomic_t fscache_n_store_radix_deletes;
37658 -atomic_t fscache_n_store_pages_over_limit;
37659 -
37660 -atomic_t fscache_n_store_vmscan_not_storing;
37661 -atomic_t fscache_n_store_vmscan_gone;
37662 -atomic_t fscache_n_store_vmscan_busy;
37663 -atomic_t fscache_n_store_vmscan_cancelled;
37664 -
37665 -atomic_t fscache_n_marks;
37666 -atomic_t fscache_n_uncaches;
37667 -
37668 -atomic_t fscache_n_acquires;
37669 -atomic_t fscache_n_acquires_null;
37670 -atomic_t fscache_n_acquires_no_cache;
37671 -atomic_t fscache_n_acquires_ok;
37672 -atomic_t fscache_n_acquires_nobufs;
37673 -atomic_t fscache_n_acquires_oom;
37674 -
37675 -atomic_t fscache_n_updates;
37676 -atomic_t fscache_n_updates_null;
37677 -atomic_t fscache_n_updates_run;
37678 -
37679 -atomic_t fscache_n_relinquishes;
37680 -atomic_t fscache_n_relinquishes_null;
37681 -atomic_t fscache_n_relinquishes_waitcrt;
37682 -atomic_t fscache_n_relinquishes_retire;
37683 -
37684 -atomic_t fscache_n_cookie_index;
37685 -atomic_t fscache_n_cookie_data;
37686 -atomic_t fscache_n_cookie_special;
37687 -
37688 -atomic_t fscache_n_object_alloc;
37689 -atomic_t fscache_n_object_no_alloc;
37690 -atomic_t fscache_n_object_lookups;
37691 -atomic_t fscache_n_object_lookups_negative;
37692 -atomic_t fscache_n_object_lookups_positive;
37693 -atomic_t fscache_n_object_lookups_timed_out;
37694 -atomic_t fscache_n_object_created;
37695 -atomic_t fscache_n_object_avail;
37696 -atomic_t fscache_n_object_dead;
37697 -
37698 -atomic_t fscache_n_checkaux_none;
37699 -atomic_t fscache_n_checkaux_okay;
37700 -atomic_t fscache_n_checkaux_update;
37701 -atomic_t fscache_n_checkaux_obsolete;
37702 +atomic_unchecked_t fscache_n_op_pend;
37703 +atomic_unchecked_t fscache_n_op_run;
37704 +atomic_unchecked_t fscache_n_op_enqueue;
37705 +atomic_unchecked_t fscache_n_op_requeue;
37706 +atomic_unchecked_t fscache_n_op_deferred_release;
37707 +atomic_unchecked_t fscache_n_op_release;
37708 +atomic_unchecked_t fscache_n_op_gc;
37709 +atomic_unchecked_t fscache_n_op_cancelled;
37710 +atomic_unchecked_t fscache_n_op_rejected;
37711 +
37712 +atomic_unchecked_t fscache_n_attr_changed;
37713 +atomic_unchecked_t fscache_n_attr_changed_ok;
37714 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
37715 +atomic_unchecked_t fscache_n_attr_changed_nomem;
37716 +atomic_unchecked_t fscache_n_attr_changed_calls;
37717 +
37718 +atomic_unchecked_t fscache_n_allocs;
37719 +atomic_unchecked_t fscache_n_allocs_ok;
37720 +atomic_unchecked_t fscache_n_allocs_wait;
37721 +atomic_unchecked_t fscache_n_allocs_nobufs;
37722 +atomic_unchecked_t fscache_n_allocs_intr;
37723 +atomic_unchecked_t fscache_n_allocs_object_dead;
37724 +atomic_unchecked_t fscache_n_alloc_ops;
37725 +atomic_unchecked_t fscache_n_alloc_op_waits;
37726 +
37727 +atomic_unchecked_t fscache_n_retrievals;
37728 +atomic_unchecked_t fscache_n_retrievals_ok;
37729 +atomic_unchecked_t fscache_n_retrievals_wait;
37730 +atomic_unchecked_t fscache_n_retrievals_nodata;
37731 +atomic_unchecked_t fscache_n_retrievals_nobufs;
37732 +atomic_unchecked_t fscache_n_retrievals_intr;
37733 +atomic_unchecked_t fscache_n_retrievals_nomem;
37734 +atomic_unchecked_t fscache_n_retrievals_object_dead;
37735 +atomic_unchecked_t fscache_n_retrieval_ops;
37736 +atomic_unchecked_t fscache_n_retrieval_op_waits;
37737 +
37738 +atomic_unchecked_t fscache_n_stores;
37739 +atomic_unchecked_t fscache_n_stores_ok;
37740 +atomic_unchecked_t fscache_n_stores_again;
37741 +atomic_unchecked_t fscache_n_stores_nobufs;
37742 +atomic_unchecked_t fscache_n_stores_oom;
37743 +atomic_unchecked_t fscache_n_store_ops;
37744 +atomic_unchecked_t fscache_n_store_calls;
37745 +atomic_unchecked_t fscache_n_store_pages;
37746 +atomic_unchecked_t fscache_n_store_radix_deletes;
37747 +atomic_unchecked_t fscache_n_store_pages_over_limit;
37748 +
37749 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37750 +atomic_unchecked_t fscache_n_store_vmscan_gone;
37751 +atomic_unchecked_t fscache_n_store_vmscan_busy;
37752 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37753 +
37754 +atomic_unchecked_t fscache_n_marks;
37755 +atomic_unchecked_t fscache_n_uncaches;
37756 +
37757 +atomic_unchecked_t fscache_n_acquires;
37758 +atomic_unchecked_t fscache_n_acquires_null;
37759 +atomic_unchecked_t fscache_n_acquires_no_cache;
37760 +atomic_unchecked_t fscache_n_acquires_ok;
37761 +atomic_unchecked_t fscache_n_acquires_nobufs;
37762 +atomic_unchecked_t fscache_n_acquires_oom;
37763 +
37764 +atomic_unchecked_t fscache_n_updates;
37765 +atomic_unchecked_t fscache_n_updates_null;
37766 +atomic_unchecked_t fscache_n_updates_run;
37767 +
37768 +atomic_unchecked_t fscache_n_relinquishes;
37769 +atomic_unchecked_t fscache_n_relinquishes_null;
37770 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37771 +atomic_unchecked_t fscache_n_relinquishes_retire;
37772 +
37773 +atomic_unchecked_t fscache_n_cookie_index;
37774 +atomic_unchecked_t fscache_n_cookie_data;
37775 +atomic_unchecked_t fscache_n_cookie_special;
37776 +
37777 +atomic_unchecked_t fscache_n_object_alloc;
37778 +atomic_unchecked_t fscache_n_object_no_alloc;
37779 +atomic_unchecked_t fscache_n_object_lookups;
37780 +atomic_unchecked_t fscache_n_object_lookups_negative;
37781 +atomic_unchecked_t fscache_n_object_lookups_positive;
37782 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
37783 +atomic_unchecked_t fscache_n_object_created;
37784 +atomic_unchecked_t fscache_n_object_avail;
37785 +atomic_unchecked_t fscache_n_object_dead;
37786 +
37787 +atomic_unchecked_t fscache_n_checkaux_none;
37788 +atomic_unchecked_t fscache_n_checkaux_okay;
37789 +atomic_unchecked_t fscache_n_checkaux_update;
37790 +atomic_unchecked_t fscache_n_checkaux_obsolete;
37791
37792 atomic_t fscache_n_cop_alloc_object;
37793 atomic_t fscache_n_cop_lookup_object;
37794 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37795 seq_puts(m, "FS-Cache statistics\n");
37796
37797 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37798 - atomic_read(&fscache_n_cookie_index),
37799 - atomic_read(&fscache_n_cookie_data),
37800 - atomic_read(&fscache_n_cookie_special));
37801 + atomic_read_unchecked(&fscache_n_cookie_index),
37802 + atomic_read_unchecked(&fscache_n_cookie_data),
37803 + atomic_read_unchecked(&fscache_n_cookie_special));
37804
37805 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37806 - atomic_read(&fscache_n_object_alloc),
37807 - atomic_read(&fscache_n_object_no_alloc),
37808 - atomic_read(&fscache_n_object_avail),
37809 - atomic_read(&fscache_n_object_dead));
37810 + atomic_read_unchecked(&fscache_n_object_alloc),
37811 + atomic_read_unchecked(&fscache_n_object_no_alloc),
37812 + atomic_read_unchecked(&fscache_n_object_avail),
37813 + atomic_read_unchecked(&fscache_n_object_dead));
37814 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37815 - atomic_read(&fscache_n_checkaux_none),
37816 - atomic_read(&fscache_n_checkaux_okay),
37817 - atomic_read(&fscache_n_checkaux_update),
37818 - atomic_read(&fscache_n_checkaux_obsolete));
37819 + atomic_read_unchecked(&fscache_n_checkaux_none),
37820 + atomic_read_unchecked(&fscache_n_checkaux_okay),
37821 + atomic_read_unchecked(&fscache_n_checkaux_update),
37822 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37823
37824 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37825 - atomic_read(&fscache_n_marks),
37826 - atomic_read(&fscache_n_uncaches));
37827 + atomic_read_unchecked(&fscache_n_marks),
37828 + atomic_read_unchecked(&fscache_n_uncaches));
37829
37830 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37831 " oom=%u\n",
37832 - atomic_read(&fscache_n_acquires),
37833 - atomic_read(&fscache_n_acquires_null),
37834 - atomic_read(&fscache_n_acquires_no_cache),
37835 - atomic_read(&fscache_n_acquires_ok),
37836 - atomic_read(&fscache_n_acquires_nobufs),
37837 - atomic_read(&fscache_n_acquires_oom));
37838 + atomic_read_unchecked(&fscache_n_acquires),
37839 + atomic_read_unchecked(&fscache_n_acquires_null),
37840 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
37841 + atomic_read_unchecked(&fscache_n_acquires_ok),
37842 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
37843 + atomic_read_unchecked(&fscache_n_acquires_oom));
37844
37845 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37846 - atomic_read(&fscache_n_object_lookups),
37847 - atomic_read(&fscache_n_object_lookups_negative),
37848 - atomic_read(&fscache_n_object_lookups_positive),
37849 - atomic_read(&fscache_n_object_created),
37850 - atomic_read(&fscache_n_object_lookups_timed_out));
37851 + atomic_read_unchecked(&fscache_n_object_lookups),
37852 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
37853 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
37854 + atomic_read_unchecked(&fscache_n_object_created),
37855 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37856
37857 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37858 - atomic_read(&fscache_n_updates),
37859 - atomic_read(&fscache_n_updates_null),
37860 - atomic_read(&fscache_n_updates_run));
37861 + atomic_read_unchecked(&fscache_n_updates),
37862 + atomic_read_unchecked(&fscache_n_updates_null),
37863 + atomic_read_unchecked(&fscache_n_updates_run));
37864
37865 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37866 - atomic_read(&fscache_n_relinquishes),
37867 - atomic_read(&fscache_n_relinquishes_null),
37868 - atomic_read(&fscache_n_relinquishes_waitcrt),
37869 - atomic_read(&fscache_n_relinquishes_retire));
37870 + atomic_read_unchecked(&fscache_n_relinquishes),
37871 + atomic_read_unchecked(&fscache_n_relinquishes_null),
37872 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37873 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
37874
37875 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37876 - atomic_read(&fscache_n_attr_changed),
37877 - atomic_read(&fscache_n_attr_changed_ok),
37878 - atomic_read(&fscache_n_attr_changed_nobufs),
37879 - atomic_read(&fscache_n_attr_changed_nomem),
37880 - atomic_read(&fscache_n_attr_changed_calls));
37881 + atomic_read_unchecked(&fscache_n_attr_changed),
37882 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
37883 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37884 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37885 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
37886
37887 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37888 - atomic_read(&fscache_n_allocs),
37889 - atomic_read(&fscache_n_allocs_ok),
37890 - atomic_read(&fscache_n_allocs_wait),
37891 - atomic_read(&fscache_n_allocs_nobufs),
37892 - atomic_read(&fscache_n_allocs_intr));
37893 + atomic_read_unchecked(&fscache_n_allocs),
37894 + atomic_read_unchecked(&fscache_n_allocs_ok),
37895 + atomic_read_unchecked(&fscache_n_allocs_wait),
37896 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
37897 + atomic_read_unchecked(&fscache_n_allocs_intr));
37898 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37899 - atomic_read(&fscache_n_alloc_ops),
37900 - atomic_read(&fscache_n_alloc_op_waits),
37901 - atomic_read(&fscache_n_allocs_object_dead));
37902 + atomic_read_unchecked(&fscache_n_alloc_ops),
37903 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
37904 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
37905
37906 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37907 " int=%u oom=%u\n",
37908 - atomic_read(&fscache_n_retrievals),
37909 - atomic_read(&fscache_n_retrievals_ok),
37910 - atomic_read(&fscache_n_retrievals_wait),
37911 - atomic_read(&fscache_n_retrievals_nodata),
37912 - atomic_read(&fscache_n_retrievals_nobufs),
37913 - atomic_read(&fscache_n_retrievals_intr),
37914 - atomic_read(&fscache_n_retrievals_nomem));
37915 + atomic_read_unchecked(&fscache_n_retrievals),
37916 + atomic_read_unchecked(&fscache_n_retrievals_ok),
37917 + atomic_read_unchecked(&fscache_n_retrievals_wait),
37918 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
37919 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37920 + atomic_read_unchecked(&fscache_n_retrievals_intr),
37921 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
37922 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37923 - atomic_read(&fscache_n_retrieval_ops),
37924 - atomic_read(&fscache_n_retrieval_op_waits),
37925 - atomic_read(&fscache_n_retrievals_object_dead));
37926 + atomic_read_unchecked(&fscache_n_retrieval_ops),
37927 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37928 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37929
37930 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37931 - atomic_read(&fscache_n_stores),
37932 - atomic_read(&fscache_n_stores_ok),
37933 - atomic_read(&fscache_n_stores_again),
37934 - atomic_read(&fscache_n_stores_nobufs),
37935 - atomic_read(&fscache_n_stores_oom));
37936 + atomic_read_unchecked(&fscache_n_stores),
37937 + atomic_read_unchecked(&fscache_n_stores_ok),
37938 + atomic_read_unchecked(&fscache_n_stores_again),
37939 + atomic_read_unchecked(&fscache_n_stores_nobufs),
37940 + atomic_read_unchecked(&fscache_n_stores_oom));
37941 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37942 - atomic_read(&fscache_n_store_ops),
37943 - atomic_read(&fscache_n_store_calls),
37944 - atomic_read(&fscache_n_store_pages),
37945 - atomic_read(&fscache_n_store_radix_deletes),
37946 - atomic_read(&fscache_n_store_pages_over_limit));
37947 + atomic_read_unchecked(&fscache_n_store_ops),
37948 + atomic_read_unchecked(&fscache_n_store_calls),
37949 + atomic_read_unchecked(&fscache_n_store_pages),
37950 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
37951 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37952
37953 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37954 - atomic_read(&fscache_n_store_vmscan_not_storing),
37955 - atomic_read(&fscache_n_store_vmscan_gone),
37956 - atomic_read(&fscache_n_store_vmscan_busy),
37957 - atomic_read(&fscache_n_store_vmscan_cancelled));
37958 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37959 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37960 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37961 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37962
37963 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37964 - atomic_read(&fscache_n_op_pend),
37965 - atomic_read(&fscache_n_op_run),
37966 - atomic_read(&fscache_n_op_enqueue),
37967 - atomic_read(&fscache_n_op_cancelled),
37968 - atomic_read(&fscache_n_op_rejected));
37969 + atomic_read_unchecked(&fscache_n_op_pend),
37970 + atomic_read_unchecked(&fscache_n_op_run),
37971 + atomic_read_unchecked(&fscache_n_op_enqueue),
37972 + atomic_read_unchecked(&fscache_n_op_cancelled),
37973 + atomic_read_unchecked(&fscache_n_op_rejected));
37974 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37975 - atomic_read(&fscache_n_op_deferred_release),
37976 - atomic_read(&fscache_n_op_release),
37977 - atomic_read(&fscache_n_op_gc));
37978 + atomic_read_unchecked(&fscache_n_op_deferred_release),
37979 + atomic_read_unchecked(&fscache_n_op_release),
37980 + atomic_read_unchecked(&fscache_n_op_gc));
37981
37982 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37983 atomic_read(&fscache_n_cop_alloc_object),
37984 diff -urNp linux-2.6.39.4/fs/fs_struct.c linux-2.6.39.4/fs/fs_struct.c
37985 --- linux-2.6.39.4/fs/fs_struct.c 2011-05-19 00:06:34.000000000 -0400
37986 +++ linux-2.6.39.4/fs/fs_struct.c 2011-08-05 19:44:37.000000000 -0400
37987 @@ -4,6 +4,7 @@
37988 #include <linux/path.h>
37989 #include <linux/slab.h>
37990 #include <linux/fs_struct.h>
37991 +#include <linux/grsecurity.h>
37992 #include "internal.h"
37993
37994 static inline void path_get_longterm(struct path *path)
37995 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37996 old_root = fs->root;
37997 fs->root = *path;
37998 path_get_longterm(path);
37999 + gr_set_chroot_entries(current, path);
38000 write_seqcount_end(&fs->seq);
38001 spin_unlock(&fs->lock);
38002 if (old_root.dentry)
38003 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
38004 && fs->root.mnt == old_root->mnt) {
38005 path_get_longterm(new_root);
38006 fs->root = *new_root;
38007 + gr_set_chroot_entries(p, new_root);
38008 count++;
38009 }
38010 if (fs->pwd.dentry == old_root->dentry
38011 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
38012 spin_lock(&fs->lock);
38013 write_seqcount_begin(&fs->seq);
38014 tsk->fs = NULL;
38015 - kill = !--fs->users;
38016 + gr_clear_chroot_entries(tsk);
38017 + kill = !atomic_dec_return(&fs->users);
38018 write_seqcount_end(&fs->seq);
38019 spin_unlock(&fs->lock);
38020 task_unlock(tsk);
38021 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
38022 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
38023 /* We don't need to lock fs - think why ;-) */
38024 if (fs) {
38025 - fs->users = 1;
38026 + atomic_set(&fs->users, 1);
38027 fs->in_exec = 0;
38028 spin_lock_init(&fs->lock);
38029 seqcount_init(&fs->seq);
38030 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
38031 spin_lock(&old->lock);
38032 fs->root = old->root;
38033 path_get_longterm(&fs->root);
38034 + /* instead of calling gr_set_chroot_entries here,
38035 + we call it from every caller of this function
38036 + */
38037 fs->pwd = old->pwd;
38038 path_get_longterm(&fs->pwd);
38039 spin_unlock(&old->lock);
38040 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
38041
38042 task_lock(current);
38043 spin_lock(&fs->lock);
38044 - kill = !--fs->users;
38045 + kill = !atomic_dec_return(&fs->users);
38046 current->fs = new_fs;
38047 + gr_set_chroot_entries(current, &new_fs->root);
38048 spin_unlock(&fs->lock);
38049 task_unlock(current);
38050
38051 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
38052
38053 /* to be mentioned only in INIT_TASK */
38054 struct fs_struct init_fs = {
38055 - .users = 1,
38056 + .users = ATOMIC_INIT(1),
38057 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
38058 .seq = SEQCNT_ZERO,
38059 .umask = 0022,
38060 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
38061 task_lock(current);
38062
38063 spin_lock(&init_fs.lock);
38064 - init_fs.users++;
38065 + atomic_inc(&init_fs.users);
38066 spin_unlock(&init_fs.lock);
38067
38068 spin_lock(&fs->lock);
38069 current->fs = &init_fs;
38070 - kill = !--fs->users;
38071 + gr_set_chroot_entries(current, &current->fs->root);
38072 + kill = !atomic_dec_return(&fs->users);
38073 spin_unlock(&fs->lock);
38074
38075 task_unlock(current);
38076 diff -urNp linux-2.6.39.4/fs/fuse/cuse.c linux-2.6.39.4/fs/fuse/cuse.c
38077 --- linux-2.6.39.4/fs/fuse/cuse.c 2011-05-19 00:06:34.000000000 -0400
38078 +++ linux-2.6.39.4/fs/fuse/cuse.c 2011-08-05 20:34:06.000000000 -0400
38079 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
38080 INIT_LIST_HEAD(&cuse_conntbl[i]);
38081
38082 /* inherit and extend fuse_dev_operations */
38083 - cuse_channel_fops = fuse_dev_operations;
38084 - cuse_channel_fops.owner = THIS_MODULE;
38085 - cuse_channel_fops.open = cuse_channel_open;
38086 - cuse_channel_fops.release = cuse_channel_release;
38087 + pax_open_kernel();
38088 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
38089 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
38090 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
38091 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
38092 + pax_close_kernel();
38093
38094 cuse_class = class_create(THIS_MODULE, "cuse");
38095 if (IS_ERR(cuse_class))
38096 diff -urNp linux-2.6.39.4/fs/fuse/dev.c linux-2.6.39.4/fs/fuse/dev.c
38097 --- linux-2.6.39.4/fs/fuse/dev.c 2011-05-19 00:06:34.000000000 -0400
38098 +++ linux-2.6.39.4/fs/fuse/dev.c 2011-08-05 20:34:06.000000000 -0400
38099 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
38100 ret = 0;
38101 pipe_lock(pipe);
38102
38103 - if (!pipe->readers) {
38104 + if (!atomic_read(&pipe->readers)) {
38105 send_sig(SIGPIPE, current, 0);
38106 if (!ret)
38107 ret = -EPIPE;
38108 diff -urNp linux-2.6.39.4/fs/fuse/dir.c linux-2.6.39.4/fs/fuse/dir.c
38109 --- linux-2.6.39.4/fs/fuse/dir.c 2011-05-19 00:06:34.000000000 -0400
38110 +++ linux-2.6.39.4/fs/fuse/dir.c 2011-08-05 19:44:37.000000000 -0400
38111 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
38112 return link;
38113 }
38114
38115 -static void free_link(char *link)
38116 +static void free_link(const char *link)
38117 {
38118 if (!IS_ERR(link))
38119 free_page((unsigned long) link);
38120 diff -urNp linux-2.6.39.4/fs/gfs2/ops_inode.c linux-2.6.39.4/fs/gfs2/ops_inode.c
38121 --- linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-05-19 00:06:34.000000000 -0400
38122 +++ linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-08-05 19:44:37.000000000 -0400
38123 @@ -740,6 +740,8 @@ static int gfs2_rename(struct inode *odi
38124 unsigned int x;
38125 int error;
38126
38127 + pax_track_stack();
38128 +
38129 if (ndentry->d_inode) {
38130 nip = GFS2_I(ndentry->d_inode);
38131 if (ip == nip)
38132 @@ -1019,7 +1021,7 @@ out:
38133
38134 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38135 {
38136 - char *s = nd_get_link(nd);
38137 + const char *s = nd_get_link(nd);
38138 if (!IS_ERR(s))
38139 kfree(s);
38140 }
38141 diff -urNp linux-2.6.39.4/fs/hfsplus/catalog.c linux-2.6.39.4/fs/hfsplus/catalog.c
38142 --- linux-2.6.39.4/fs/hfsplus/catalog.c 2011-05-19 00:06:34.000000000 -0400
38143 +++ linux-2.6.39.4/fs/hfsplus/catalog.c 2011-08-05 19:44:37.000000000 -0400
38144 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38145 int err;
38146 u16 type;
38147
38148 + pax_track_stack();
38149 +
38150 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38151 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38152 if (err)
38153 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38154 int entry_size;
38155 int err;
38156
38157 + pax_track_stack();
38158 +
38159 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38160 str->name, cnid, inode->i_nlink);
38161 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38162 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38163 int entry_size, type;
38164 int err = 0;
38165
38166 + pax_track_stack();
38167 +
38168 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38169 cnid, src_dir->i_ino, src_name->name,
38170 dst_dir->i_ino, dst_name->name);
38171 diff -urNp linux-2.6.39.4/fs/hfsplus/dir.c linux-2.6.39.4/fs/hfsplus/dir.c
38172 --- linux-2.6.39.4/fs/hfsplus/dir.c 2011-05-19 00:06:34.000000000 -0400
38173 +++ linux-2.6.39.4/fs/hfsplus/dir.c 2011-08-05 19:44:37.000000000 -0400
38174 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38175 struct hfsplus_readdir_data *rd;
38176 u16 type;
38177
38178 + pax_track_stack();
38179 +
38180 if (filp->f_pos >= inode->i_size)
38181 return 0;
38182
38183 diff -urNp linux-2.6.39.4/fs/hfsplus/inode.c linux-2.6.39.4/fs/hfsplus/inode.c
38184 --- linux-2.6.39.4/fs/hfsplus/inode.c 2011-05-19 00:06:34.000000000 -0400
38185 +++ linux-2.6.39.4/fs/hfsplus/inode.c 2011-08-05 19:44:37.000000000 -0400
38186 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38187 int res = 0;
38188 u16 type;
38189
38190 + pax_track_stack();
38191 +
38192 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38193
38194 HFSPLUS_I(inode)->linkid = 0;
38195 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38196 struct hfs_find_data fd;
38197 hfsplus_cat_entry entry;
38198
38199 + pax_track_stack();
38200 +
38201 if (HFSPLUS_IS_RSRC(inode))
38202 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38203
38204 diff -urNp linux-2.6.39.4/fs/hfsplus/ioctl.c linux-2.6.39.4/fs/hfsplus/ioctl.c
38205 --- linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-05-19 00:06:34.000000000 -0400
38206 +++ linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-08-05 19:44:37.000000000 -0400
38207 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38208 struct hfsplus_cat_file *file;
38209 int res;
38210
38211 + pax_track_stack();
38212 +
38213 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38214 return -EOPNOTSUPP;
38215
38216 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38217 struct hfsplus_cat_file *file;
38218 ssize_t res = 0;
38219
38220 + pax_track_stack();
38221 +
38222 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38223 return -EOPNOTSUPP;
38224
38225 diff -urNp linux-2.6.39.4/fs/hfsplus/super.c linux-2.6.39.4/fs/hfsplus/super.c
38226 --- linux-2.6.39.4/fs/hfsplus/super.c 2011-05-19 00:06:34.000000000 -0400
38227 +++ linux-2.6.39.4/fs/hfsplus/super.c 2011-08-05 19:44:37.000000000 -0400
38228 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38229 struct nls_table *nls = NULL;
38230 int err;
38231
38232 + pax_track_stack();
38233 +
38234 err = -EINVAL;
38235 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38236 if (!sbi)
38237 diff -urNp linux-2.6.39.4/fs/hugetlbfs/inode.c linux-2.6.39.4/fs/hugetlbfs/inode.c
38238 --- linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-05-19 00:06:34.000000000 -0400
38239 +++ linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38240 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38241 .kill_sb = kill_litter_super,
38242 };
38243
38244 -static struct vfsmount *hugetlbfs_vfsmount;
38245 +struct vfsmount *hugetlbfs_vfsmount;
38246
38247 static int can_do_hugetlb_shm(void)
38248 {
38249 diff -urNp linux-2.6.39.4/fs/inode.c linux-2.6.39.4/fs/inode.c
38250 --- linux-2.6.39.4/fs/inode.c 2011-05-19 00:06:34.000000000 -0400
38251 +++ linux-2.6.39.4/fs/inode.c 2011-08-05 19:44:37.000000000 -0400
38252 @@ -862,8 +862,8 @@ unsigned int get_next_ino(void)
38253
38254 #ifdef CONFIG_SMP
38255 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38256 - static atomic_t shared_last_ino;
38257 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38258 + static atomic_unchecked_t shared_last_ino;
38259 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38260
38261 res = next - LAST_INO_BATCH;
38262 }
38263 diff -urNp linux-2.6.39.4/fs/jbd/checkpoint.c linux-2.6.39.4/fs/jbd/checkpoint.c
38264 --- linux-2.6.39.4/fs/jbd/checkpoint.c 2011-05-19 00:06:34.000000000 -0400
38265 +++ linux-2.6.39.4/fs/jbd/checkpoint.c 2011-08-05 19:44:37.000000000 -0400
38266 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38267 tid_t this_tid;
38268 int result;
38269
38270 + pax_track_stack();
38271 +
38272 jbd_debug(1, "Start checkpoint\n");
38273
38274 /*
38275 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rtime.c linux-2.6.39.4/fs/jffs2/compr_rtime.c
38276 --- linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-05-19 00:06:34.000000000 -0400
38277 +++ linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-08-05 19:44:37.000000000 -0400
38278 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38279 int outpos = 0;
38280 int pos=0;
38281
38282 + pax_track_stack();
38283 +
38284 memset(positions,0,sizeof(positions));
38285
38286 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38287 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38288 int outpos = 0;
38289 int pos=0;
38290
38291 + pax_track_stack();
38292 +
38293 memset(positions,0,sizeof(positions));
38294
38295 while (outpos<destlen) {
38296 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rubin.c linux-2.6.39.4/fs/jffs2/compr_rubin.c
38297 --- linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-05-19 00:06:34.000000000 -0400
38298 +++ linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-08-05 19:44:37.000000000 -0400
38299 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38300 int ret;
38301 uint32_t mysrclen, mydstlen;
38302
38303 + pax_track_stack();
38304 +
38305 mysrclen = *sourcelen;
38306 mydstlen = *dstlen - 8;
38307
38308 diff -urNp linux-2.6.39.4/fs/jffs2/erase.c linux-2.6.39.4/fs/jffs2/erase.c
38309 --- linux-2.6.39.4/fs/jffs2/erase.c 2011-05-19 00:06:34.000000000 -0400
38310 +++ linux-2.6.39.4/fs/jffs2/erase.c 2011-08-05 19:44:37.000000000 -0400
38311 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38312 struct jffs2_unknown_node marker = {
38313 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38314 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38315 - .totlen = cpu_to_je32(c->cleanmarker_size)
38316 + .totlen = cpu_to_je32(c->cleanmarker_size),
38317 + .hdr_crc = cpu_to_je32(0)
38318 };
38319
38320 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38321 diff -urNp linux-2.6.39.4/fs/jffs2/wbuf.c linux-2.6.39.4/fs/jffs2/wbuf.c
38322 --- linux-2.6.39.4/fs/jffs2/wbuf.c 2011-05-19 00:06:34.000000000 -0400
38323 +++ linux-2.6.39.4/fs/jffs2/wbuf.c 2011-08-05 19:44:37.000000000 -0400
38324 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38325 {
38326 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38327 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38328 - .totlen = constant_cpu_to_je32(8)
38329 + .totlen = constant_cpu_to_je32(8),
38330 + .hdr_crc = constant_cpu_to_je32(0)
38331 };
38332
38333 /*
38334 diff -urNp linux-2.6.39.4/fs/jffs2/xattr.c linux-2.6.39.4/fs/jffs2/xattr.c
38335 --- linux-2.6.39.4/fs/jffs2/xattr.c 2011-05-19 00:06:34.000000000 -0400
38336 +++ linux-2.6.39.4/fs/jffs2/xattr.c 2011-08-05 19:44:37.000000000 -0400
38337 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38338
38339 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38340
38341 + pax_track_stack();
38342 +
38343 /* Phase.1 : Merge same xref */
38344 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38345 xref_tmphash[i] = NULL;
38346 diff -urNp linux-2.6.39.4/fs/jfs/super.c linux-2.6.39.4/fs/jfs/super.c
38347 --- linux-2.6.39.4/fs/jfs/super.c 2011-05-19 00:06:34.000000000 -0400
38348 +++ linux-2.6.39.4/fs/jfs/super.c 2011-08-05 19:44:37.000000000 -0400
38349 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38350
38351 jfs_inode_cachep =
38352 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38353 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38354 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38355 init_once);
38356 if (jfs_inode_cachep == NULL)
38357 return -ENOMEM;
38358 diff -urNp linux-2.6.39.4/fs/Kconfig.binfmt linux-2.6.39.4/fs/Kconfig.binfmt
38359 --- linux-2.6.39.4/fs/Kconfig.binfmt 2011-05-19 00:06:34.000000000 -0400
38360 +++ linux-2.6.39.4/fs/Kconfig.binfmt 2011-08-05 19:44:37.000000000 -0400
38361 @@ -86,7 +86,7 @@ config HAVE_AOUT
38362
38363 config BINFMT_AOUT
38364 tristate "Kernel support for a.out and ECOFF binaries"
38365 - depends on HAVE_AOUT
38366 + depends on HAVE_AOUT && BROKEN
38367 ---help---
38368 A.out (Assembler.OUTput) is a set of formats for libraries and
38369 executables used in the earliest versions of UNIX. Linux used
38370 diff -urNp linux-2.6.39.4/fs/libfs.c linux-2.6.39.4/fs/libfs.c
38371 --- linux-2.6.39.4/fs/libfs.c 2011-05-19 00:06:34.000000000 -0400
38372 +++ linux-2.6.39.4/fs/libfs.c 2011-08-05 19:44:37.000000000 -0400
38373 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38374
38375 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38376 struct dentry *next;
38377 + char d_name[sizeof(next->d_iname)];
38378 + const unsigned char *name;
38379 +
38380 next = list_entry(p, struct dentry, d_u.d_child);
38381 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38382 if (!simple_positive(next)) {
38383 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38384
38385 spin_unlock(&next->d_lock);
38386 spin_unlock(&dentry->d_lock);
38387 - if (filldir(dirent, next->d_name.name,
38388 + name = next->d_name.name;
38389 + if (name == next->d_iname) {
38390 + memcpy(d_name, name, next->d_name.len);
38391 + name = d_name;
38392 + }
38393 + if (filldir(dirent, name,
38394 next->d_name.len, filp->f_pos,
38395 next->d_inode->i_ino,
38396 dt_type(next->d_inode)) < 0)
38397 diff -urNp linux-2.6.39.4/fs/lockd/clntproc.c linux-2.6.39.4/fs/lockd/clntproc.c
38398 --- linux-2.6.39.4/fs/lockd/clntproc.c 2011-07-09 09:18:51.000000000 -0400
38399 +++ linux-2.6.39.4/fs/lockd/clntproc.c 2011-08-05 19:44:37.000000000 -0400
38400 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38401 /*
38402 * Cookie counter for NLM requests
38403 */
38404 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38405 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38406
38407 void nlmclnt_next_cookie(struct nlm_cookie *c)
38408 {
38409 - u32 cookie = atomic_inc_return(&nlm_cookie);
38410 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38411
38412 memcpy(c->data, &cookie, 4);
38413 c->len=4;
38414 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38415 struct nlm_rqst reqst, *req;
38416 int status;
38417
38418 + pax_track_stack();
38419 +
38420 req = &reqst;
38421 memset(req, 0, sizeof(*req));
38422 locks_init_lock(&req->a_args.lock.fl);
38423 diff -urNp linux-2.6.39.4/fs/locks.c linux-2.6.39.4/fs/locks.c
38424 --- linux-2.6.39.4/fs/locks.c 2011-07-09 09:18:51.000000000 -0400
38425 +++ linux-2.6.39.4/fs/locks.c 2011-08-05 19:44:37.000000000 -0400
38426 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38427 return;
38428
38429 if (filp->f_op && filp->f_op->flock) {
38430 - struct file_lock fl = {
38431 + struct file_lock flock = {
38432 .fl_pid = current->tgid,
38433 .fl_file = filp,
38434 .fl_flags = FL_FLOCK,
38435 .fl_type = F_UNLCK,
38436 .fl_end = OFFSET_MAX,
38437 };
38438 - filp->f_op->flock(filp, F_SETLKW, &fl);
38439 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
38440 - fl.fl_ops->fl_release_private(&fl);
38441 + filp->f_op->flock(filp, F_SETLKW, &flock);
38442 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
38443 + flock.fl_ops->fl_release_private(&flock);
38444 }
38445
38446 lock_flocks();
38447 diff -urNp linux-2.6.39.4/fs/logfs/super.c linux-2.6.39.4/fs/logfs/super.c
38448 --- linux-2.6.39.4/fs/logfs/super.c 2011-05-19 00:06:34.000000000 -0400
38449 +++ linux-2.6.39.4/fs/logfs/super.c 2011-08-05 19:44:37.000000000 -0400
38450 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38451 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38452 int err, valid0, valid1;
38453
38454 + pax_track_stack();
38455 +
38456 /* read first superblock */
38457 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38458 if (err)
38459 diff -urNp linux-2.6.39.4/fs/namei.c linux-2.6.39.4/fs/namei.c
38460 --- linux-2.6.39.4/fs/namei.c 2011-08-05 21:11:51.000000000 -0400
38461 +++ linux-2.6.39.4/fs/namei.c 2011-08-05 21:12:20.000000000 -0400
38462 @@ -237,20 +237,30 @@ int generic_permission(struct inode *ino
38463 return ret;
38464
38465 /*
38466 - * Read/write DACs are always overridable.
38467 - * Executable DACs are overridable if at least one exec bit is set.
38468 + * Searching includes executable on directories, else just read.
38469 */
38470 - if (!(mask & MAY_EXEC) || execute_ok(inode))
38471 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38472 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38473 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38474 +#ifdef CONFIG_GRKERNSEC
38475 + if (flags & IPERM_FLAG_RCU)
38476 + return -ECHILD;
38477 +#endif
38478 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38479 return 0;
38480 + }
38481
38482 /*
38483 - * Searching includes executable on directories, else just read.
38484 + * Read/write DACs are always overridable.
38485 + * Executable DACs are overridable if at least one exec bit is set.
38486 */
38487 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38488 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38489 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38490 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38491 +#ifdef CONFIG_GRKERNSEC
38492 + if (flags & IPERM_FLAG_RCU)
38493 + return -ECHILD;
38494 +#endif
38495 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38496 return 0;
38497 + }
38498
38499 return -EACCES;
38500 }
38501 @@ -626,6 +636,9 @@ static inline int handle_reval_path(stru
38502 struct dentry *dentry = nd->path.dentry;
38503 int status;
38504
38505 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38506 + return -ENOENT;
38507 +
38508 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38509 return 0;
38510
38511 @@ -671,9 +684,16 @@ static inline int exec_permission(struct
38512 if (ret == -ECHILD)
38513 return ret;
38514
38515 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38516 - ns_capable(ns, CAP_DAC_READ_SEARCH))
38517 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38518 goto ok;
38519 + else {
38520 +#ifdef CONFIG_GRKERNSEC
38521 + if (flags & IPERM_FLAG_RCU)
38522 + return -ECHILD;
38523 +#endif
38524 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38525 + goto ok;
38526 + }
38527
38528 return ret;
38529 ok:
38530 @@ -781,11 +801,19 @@ follow_link(struct path *link, struct na
38531 return error;
38532 }
38533
38534 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
38535 + dentry->d_inode, dentry, nd->path.mnt)) {
38536 + error = -EACCES;
38537 + *p = ERR_PTR(error); /* no ->put_link(), please */
38538 + path_put(&nd->path);
38539 + return error;
38540 + }
38541 +
38542 nd->last_type = LAST_BIND;
38543 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38544 error = PTR_ERR(*p);
38545 if (!IS_ERR(*p)) {
38546 - char *s = nd_get_link(nd);
38547 + const char *s = nd_get_link(nd);
38548 error = 0;
38549 if (s)
38550 error = __vfs_follow_link(nd, s);
38551 @@ -1702,6 +1730,9 @@ static int do_path_lookup(int dfd, const
38552 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38553
38554 if (likely(!retval)) {
38555 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38556 + return -ENOENT;
38557 +
38558 if (unlikely(!audit_dummy_context())) {
38559 if (nd->path.dentry && nd->inode)
38560 audit_inode(name, nd->path.dentry);
38561 @@ -2012,6 +2043,30 @@ int vfs_create(struct inode *dir, struct
38562 return error;
38563 }
38564
38565 +/*
38566 + * Note that while the flag value (low two bits) for sys_open means:
38567 + * 00 - read-only
38568 + * 01 - write-only
38569 + * 10 - read-write
38570 + * 11 - special
38571 + * it is changed into
38572 + * 00 - no permissions needed
38573 + * 01 - read-permission
38574 + * 10 - write-permission
38575 + * 11 - read-write
38576 + * for the internal routines (ie open_namei()/follow_link() etc)
38577 + * This is more logical, and also allows the 00 "no perm needed"
38578 + * to be used for symlinks (where the permissions are checked
38579 + * later).
38580 + *
38581 +*/
38582 +static inline int open_to_namei_flags(int flag)
38583 +{
38584 + if ((flag+1) & O_ACCMODE)
38585 + flag++;
38586 + return flag;
38587 +}
38588 +
38589 static int may_open(struct path *path, int acc_mode, int flag)
38590 {
38591 struct dentry *dentry = path->dentry;
38592 @@ -2064,7 +2119,27 @@ static int may_open(struct path *path, i
38593 /*
38594 * Ensure there are no outstanding leases on the file.
38595 */
38596 - return break_lease(inode, flag);
38597 + error = break_lease(inode, flag);
38598 +
38599 + if (error)
38600 + return error;
38601 +
38602 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38603 + error = -EPERM;
38604 + goto exit;
38605 + }
38606 +
38607 + if (gr_handle_rawio(inode)) {
38608 + error = -EPERM;
38609 + goto exit;
38610 + }
38611 +
38612 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38613 + error = -EACCES;
38614 + goto exit;
38615 + }
38616 +exit:
38617 + return error;
38618 }
38619
38620 static int handle_truncate(struct file *filp)
38621 @@ -2090,30 +2165,6 @@ static int handle_truncate(struct file *
38622 }
38623
38624 /*
38625 - * Note that while the flag value (low two bits) for sys_open means:
38626 - * 00 - read-only
38627 - * 01 - write-only
38628 - * 10 - read-write
38629 - * 11 - special
38630 - * it is changed into
38631 - * 00 - no permissions needed
38632 - * 01 - read-permission
38633 - * 10 - write-permission
38634 - * 11 - read-write
38635 - * for the internal routines (ie open_namei()/follow_link() etc)
38636 - * This is more logical, and also allows the 00 "no perm needed"
38637 - * to be used for symlinks (where the permissions are checked
38638 - * later).
38639 - *
38640 -*/
38641 -static inline int open_to_namei_flags(int flag)
38642 -{
38643 - if ((flag+1) & O_ACCMODE)
38644 - flag++;
38645 - return flag;
38646 -}
38647 -
38648 -/*
38649 * Handle the last step of open()
38650 */
38651 static struct file *do_last(struct nameidata *nd, struct path *path,
38652 @@ -2122,6 +2173,7 @@ static struct file *do_last(struct namei
38653 struct dentry *dir = nd->path.dentry;
38654 struct dentry *dentry;
38655 int open_flag = op->open_flag;
38656 + int flag = open_to_namei_flags(open_flag);
38657 int will_truncate = open_flag & O_TRUNC;
38658 int want_write = 0;
38659 int acc_mode = op->acc_mode;
38660 @@ -2217,6 +2269,12 @@ static struct file *do_last(struct namei
38661 /* Negative dentry, just create the file */
38662 if (!dentry->d_inode) {
38663 int mode = op->mode;
38664 +
38665 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38666 + error = -EACCES;
38667 + goto exit_mutex_unlock;
38668 + }
38669 +
38670 if (!IS_POSIXACL(dir->d_inode))
38671 mode &= ~current_umask();
38672 /*
38673 @@ -2240,6 +2298,8 @@ static struct file *do_last(struct namei
38674 error = vfs_create(dir->d_inode, dentry, mode, nd);
38675 if (error)
38676 goto exit_mutex_unlock;
38677 + else
38678 + gr_handle_create(path->dentry, path->mnt);
38679 mutex_unlock(&dir->d_inode->i_mutex);
38680 dput(nd->path.dentry);
38681 nd->path.dentry = dentry;
38682 @@ -2249,6 +2309,14 @@ static struct file *do_last(struct namei
38683 /*
38684 * It already exists.
38685 */
38686 +
38687 + /* only check if O_CREAT is specified, all other checks need to go
38688 + into may_open */
38689 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38690 + error = -EACCES;
38691 + goto exit_mutex_unlock;
38692 + }
38693 +
38694 mutex_unlock(&dir->d_inode->i_mutex);
38695 audit_inode(pathname, path->dentry);
38696
38697 @@ -2535,6 +2603,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38698 error = may_mknod(mode);
38699 if (error)
38700 goto out_dput;
38701 +
38702 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38703 + error = -EPERM;
38704 + goto out_dput;
38705 + }
38706 +
38707 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38708 + error = -EACCES;
38709 + goto out_dput;
38710 + }
38711 +
38712 error = mnt_want_write(nd.path.mnt);
38713 if (error)
38714 goto out_dput;
38715 @@ -2555,6 +2634,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38716 }
38717 out_drop_write:
38718 mnt_drop_write(nd.path.mnt);
38719 +
38720 + if (!error)
38721 + gr_handle_create(dentry, nd.path.mnt);
38722 out_dput:
38723 dput(dentry);
38724 out_unlock:
38725 @@ -2607,6 +2689,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38726 if (IS_ERR(dentry))
38727 goto out_unlock;
38728
38729 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38730 + error = -EACCES;
38731 + goto out_dput;
38732 + }
38733 +
38734 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38735 mode &= ~current_umask();
38736 error = mnt_want_write(nd.path.mnt);
38737 @@ -2618,6 +2705,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38738 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38739 out_drop_write:
38740 mnt_drop_write(nd.path.mnt);
38741 +
38742 + if (!error)
38743 + gr_handle_create(dentry, nd.path.mnt);
38744 +
38745 out_dput:
38746 dput(dentry);
38747 out_unlock:
38748 @@ -2697,6 +2788,8 @@ static long do_rmdir(int dfd, const char
38749 char * name;
38750 struct dentry *dentry;
38751 struct nameidata nd;
38752 + ino_t saved_ino = 0;
38753 + dev_t saved_dev = 0;
38754
38755 error = user_path_parent(dfd, pathname, &nd, &name);
38756 if (error)
38757 @@ -2721,6 +2814,19 @@ static long do_rmdir(int dfd, const char
38758 error = PTR_ERR(dentry);
38759 if (IS_ERR(dentry))
38760 goto exit2;
38761 +
38762 + if (dentry->d_inode != NULL) {
38763 + if (dentry->d_inode->i_nlink <= 1) {
38764 + saved_ino = dentry->d_inode->i_ino;
38765 + saved_dev = gr_get_dev_from_dentry(dentry);
38766 + }
38767 +
38768 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38769 + error = -EACCES;
38770 + goto exit3;
38771 + }
38772 + }
38773 +
38774 error = mnt_want_write(nd.path.mnt);
38775 if (error)
38776 goto exit3;
38777 @@ -2728,6 +2834,8 @@ static long do_rmdir(int dfd, const char
38778 if (error)
38779 goto exit4;
38780 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38781 + if (!error && (saved_dev || saved_ino))
38782 + gr_handle_delete(saved_ino, saved_dev);
38783 exit4:
38784 mnt_drop_write(nd.path.mnt);
38785 exit3:
38786 @@ -2790,6 +2898,8 @@ static long do_unlinkat(int dfd, const c
38787 struct dentry *dentry;
38788 struct nameidata nd;
38789 struct inode *inode = NULL;
38790 + ino_t saved_ino = 0;
38791 + dev_t saved_dev = 0;
38792
38793 error = user_path_parent(dfd, pathname, &nd, &name);
38794 if (error)
38795 @@ -2809,8 +2919,17 @@ static long do_unlinkat(int dfd, const c
38796 if (nd.last.name[nd.last.len])
38797 goto slashes;
38798 inode = dentry->d_inode;
38799 - if (inode)
38800 + if (inode) {
38801 ihold(inode);
38802 + if (inode->i_nlink <= 1) {
38803 + saved_ino = inode->i_ino;
38804 + saved_dev = gr_get_dev_from_dentry(dentry);
38805 + }
38806 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38807 + error = -EACCES;
38808 + goto exit2;
38809 + }
38810 + }
38811 error = mnt_want_write(nd.path.mnt);
38812 if (error)
38813 goto exit2;
38814 @@ -2818,6 +2937,8 @@ static long do_unlinkat(int dfd, const c
38815 if (error)
38816 goto exit3;
38817 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38818 + if (!error && (saved_ino || saved_dev))
38819 + gr_handle_delete(saved_ino, saved_dev);
38820 exit3:
38821 mnt_drop_write(nd.path.mnt);
38822 exit2:
38823 @@ -2895,6 +3016,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38824 if (IS_ERR(dentry))
38825 goto out_unlock;
38826
38827 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38828 + error = -EACCES;
38829 + goto out_dput;
38830 + }
38831 +
38832 error = mnt_want_write(nd.path.mnt);
38833 if (error)
38834 goto out_dput;
38835 @@ -2902,6 +3028,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38836 if (error)
38837 goto out_drop_write;
38838 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38839 + if (!error)
38840 + gr_handle_create(dentry, nd.path.mnt);
38841 out_drop_write:
38842 mnt_drop_write(nd.path.mnt);
38843 out_dput:
38844 @@ -3010,6 +3138,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38845 error = PTR_ERR(new_dentry);
38846 if (IS_ERR(new_dentry))
38847 goto out_unlock;
38848 +
38849 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38850 + old_path.dentry->d_inode,
38851 + old_path.dentry->d_inode->i_mode, to)) {
38852 + error = -EACCES;
38853 + goto out_dput;
38854 + }
38855 +
38856 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38857 + old_path.dentry, old_path.mnt, to)) {
38858 + error = -EACCES;
38859 + goto out_dput;
38860 + }
38861 +
38862 error = mnt_want_write(nd.path.mnt);
38863 if (error)
38864 goto out_dput;
38865 @@ -3017,6 +3159,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38866 if (error)
38867 goto out_drop_write;
38868 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38869 + if (!error)
38870 + gr_handle_create(new_dentry, nd.path.mnt);
38871 out_drop_write:
38872 mnt_drop_write(nd.path.mnt);
38873 out_dput:
38874 @@ -3194,6 +3338,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38875 char *to;
38876 int error;
38877
38878 + pax_track_stack();
38879 +
38880 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38881 if (error)
38882 goto exit;
38883 @@ -3250,6 +3396,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38884 if (new_dentry == trap)
38885 goto exit5;
38886
38887 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38888 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
38889 + to);
38890 + if (error)
38891 + goto exit5;
38892 +
38893 error = mnt_want_write(oldnd.path.mnt);
38894 if (error)
38895 goto exit5;
38896 @@ -3259,6 +3411,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38897 goto exit6;
38898 error = vfs_rename(old_dir->d_inode, old_dentry,
38899 new_dir->d_inode, new_dentry);
38900 + if (!error)
38901 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38902 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38903 exit6:
38904 mnt_drop_write(oldnd.path.mnt);
38905 exit5:
38906 @@ -3284,6 +3439,8 @@ SYSCALL_DEFINE2(rename, const char __use
38907
38908 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38909 {
38910 + char tmpbuf[64];
38911 + const char *newlink;
38912 int len;
38913
38914 len = PTR_ERR(link);
38915 @@ -3293,7 +3450,14 @@ int vfs_readlink(struct dentry *dentry,
38916 len = strlen(link);
38917 if (len > (unsigned) buflen)
38918 len = buflen;
38919 - if (copy_to_user(buffer, link, len))
38920 +
38921 + if (len < sizeof(tmpbuf)) {
38922 + memcpy(tmpbuf, link, len);
38923 + newlink = tmpbuf;
38924 + } else
38925 + newlink = link;
38926 +
38927 + if (copy_to_user(buffer, newlink, len))
38928 len = -EFAULT;
38929 out:
38930 return len;
38931 diff -urNp linux-2.6.39.4/fs/namespace.c linux-2.6.39.4/fs/namespace.c
38932 --- linux-2.6.39.4/fs/namespace.c 2011-05-19 00:06:34.000000000 -0400
38933 +++ linux-2.6.39.4/fs/namespace.c 2011-08-05 19:44:37.000000000 -0400
38934 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38935 if (!(sb->s_flags & MS_RDONLY))
38936 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38937 up_write(&sb->s_umount);
38938 +
38939 + gr_log_remount(mnt->mnt_devname, retval);
38940 +
38941 return retval;
38942 }
38943
38944 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38945 br_write_unlock(vfsmount_lock);
38946 up_write(&namespace_sem);
38947 release_mounts(&umount_list);
38948 +
38949 + gr_log_unmount(mnt->mnt_devname, retval);
38950 +
38951 return retval;
38952 }
38953
38954 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38955 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38956 MS_STRICTATIME);
38957
38958 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38959 + retval = -EPERM;
38960 + goto dput_out;
38961 + }
38962 +
38963 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38964 + retval = -EPERM;
38965 + goto dput_out;
38966 + }
38967 +
38968 if (flags & MS_REMOUNT)
38969 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38970 data_page);
38971 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38972 dev_name, data_page);
38973 dput_out:
38974 path_put(&path);
38975 +
38976 + gr_log_mount(dev_name, dir_name, retval);
38977 +
38978 return retval;
38979 }
38980
38981 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38982 if (error)
38983 goto out2;
38984
38985 + if (gr_handle_chroot_pivot()) {
38986 + error = -EPERM;
38987 + goto out2;
38988 + }
38989 +
38990 get_fs_root(current->fs, &root);
38991 error = lock_mount(&old);
38992 if (error)
38993 diff -urNp linux-2.6.39.4/fs/ncpfs/dir.c linux-2.6.39.4/fs/ncpfs/dir.c
38994 --- linux-2.6.39.4/fs/ncpfs/dir.c 2011-05-19 00:06:34.000000000 -0400
38995 +++ linux-2.6.39.4/fs/ncpfs/dir.c 2011-08-05 19:44:37.000000000 -0400
38996 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38997 int res, val = 0, len;
38998 __u8 __name[NCP_MAXPATHLEN + 1];
38999
39000 + pax_track_stack();
39001 +
39002 if (dentry == dentry->d_sb->s_root)
39003 return 1;
39004
39005 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
39006 int error, res, len;
39007 __u8 __name[NCP_MAXPATHLEN + 1];
39008
39009 + pax_track_stack();
39010 +
39011 error = -EIO;
39012 if (!ncp_conn_valid(server))
39013 goto finished;
39014 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
39015 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
39016 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
39017
39018 + pax_track_stack();
39019 +
39020 ncp_age_dentry(server, dentry);
39021 len = sizeof(__name);
39022 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
39023 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
39024 int error, len;
39025 __u8 __name[NCP_MAXPATHLEN + 1];
39026
39027 + pax_track_stack();
39028 +
39029 DPRINTK("ncp_mkdir: making %s/%s\n",
39030 dentry->d_parent->d_name.name, dentry->d_name.name);
39031
39032 @@ -1135,6 +1143,8 @@ static int ncp_rename(struct inode *old_
39033 int old_len, new_len;
39034 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
39035
39036 + pax_track_stack();
39037 +
39038 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
39039 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
39040 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
39041 diff -urNp linux-2.6.39.4/fs/ncpfs/inode.c linux-2.6.39.4/fs/ncpfs/inode.c
39042 --- linux-2.6.39.4/fs/ncpfs/inode.c 2011-05-19 00:06:34.000000000 -0400
39043 +++ linux-2.6.39.4/fs/ncpfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39044 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
39045 #endif
39046 struct ncp_entry_info finfo;
39047
39048 + pax_track_stack();
39049 +
39050 data.wdog_pid = NULL;
39051 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
39052 if (!server)
39053 diff -urNp linux-2.6.39.4/fs/nfs/inode.c linux-2.6.39.4/fs/nfs/inode.c
39054 --- linux-2.6.39.4/fs/nfs/inode.c 2011-07-09 09:18:51.000000000 -0400
39055 +++ linux-2.6.39.4/fs/nfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39056 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
39057 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
39058 nfsi->attrtimeo_timestamp = jiffies;
39059
39060 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
39061 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
39062 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
39063 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
39064 else
39065 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
39066 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
39067 }
39068
39069 -static atomic_long_t nfs_attr_generation_counter;
39070 +static atomic_long_unchecked_t nfs_attr_generation_counter;
39071
39072 static unsigned long nfs_read_attr_generation_counter(void)
39073 {
39074 - return atomic_long_read(&nfs_attr_generation_counter);
39075 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
39076 }
39077
39078 unsigned long nfs_inc_attr_generation_counter(void)
39079 {
39080 - return atomic_long_inc_return(&nfs_attr_generation_counter);
39081 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
39082 }
39083
39084 void nfs_fattr_init(struct nfs_fattr *fattr)
39085 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4state.c linux-2.6.39.4/fs/nfsd/nfs4state.c
39086 --- linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-05-19 00:06:34.000000000 -0400
39087 +++ linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-08-05 19:44:37.000000000 -0400
39088 @@ -3784,6 +3784,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
39089 unsigned int strhashval;
39090 int err;
39091
39092 + pax_track_stack();
39093 +
39094 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
39095 (long long) lock->lk_offset,
39096 (long long) lock->lk_length);
39097 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4xdr.c linux-2.6.39.4/fs/nfsd/nfs4xdr.c
39098 --- linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-05-19 00:06:34.000000000 -0400
39099 +++ linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-08-05 19:44:37.000000000 -0400
39100 @@ -1793,6 +1793,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
39101 .dentry = dentry,
39102 };
39103
39104 + pax_track_stack();
39105 +
39106 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
39107 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
39108 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
39109 diff -urNp linux-2.6.39.4/fs/nfsd/vfs.c linux-2.6.39.4/fs/nfsd/vfs.c
39110 --- linux-2.6.39.4/fs/nfsd/vfs.c 2011-07-09 09:18:51.000000000 -0400
39111 +++ linux-2.6.39.4/fs/nfsd/vfs.c 2011-08-05 19:44:37.000000000 -0400
39112 @@ -901,7 +901,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
39113 } else {
39114 oldfs = get_fs();
39115 set_fs(KERNEL_DS);
39116 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
39117 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
39118 set_fs(oldfs);
39119 }
39120
39121 @@ -1005,7 +1005,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
39122
39123 /* Write the data. */
39124 oldfs = get_fs(); set_fs(KERNEL_DS);
39125 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
39126 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
39127 set_fs(oldfs);
39128 if (host_err < 0)
39129 goto out_nfserr;
39130 @@ -1528,7 +1528,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
39131 */
39132
39133 oldfs = get_fs(); set_fs(KERNEL_DS);
39134 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
39135 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
39136 set_fs(oldfs);
39137
39138 if (host_err < 0)
39139 diff -urNp linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c
39140 --- linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-05-19 00:06:34.000000000 -0400
39141 +++ linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-08-14 11:28:46.000000000 -0400
39142 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
39143 goto out_close_fd;
39144
39145 ret = -EFAULT;
39146 - if (copy_to_user(buf, &fanotify_event_metadata,
39147 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
39148 + copy_to_user(buf, &fanotify_event_metadata,
39149 fanotify_event_metadata.event_len))
39150 goto out_kill_access_response;
39151
39152 diff -urNp linux-2.6.39.4/fs/notify/notification.c linux-2.6.39.4/fs/notify/notification.c
39153 --- linux-2.6.39.4/fs/notify/notification.c 2011-05-19 00:06:34.000000000 -0400
39154 +++ linux-2.6.39.4/fs/notify/notification.c 2011-08-05 19:44:37.000000000 -0400
39155 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39156 * get set to 0 so it will never get 'freed'
39157 */
39158 static struct fsnotify_event *q_overflow_event;
39159 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39160 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39161
39162 /**
39163 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39164 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39165 */
39166 u32 fsnotify_get_cookie(void)
39167 {
39168 - return atomic_inc_return(&fsnotify_sync_cookie);
39169 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39170 }
39171 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39172
39173 diff -urNp linux-2.6.39.4/fs/ntfs/dir.c linux-2.6.39.4/fs/ntfs/dir.c
39174 --- linux-2.6.39.4/fs/ntfs/dir.c 2011-05-19 00:06:34.000000000 -0400
39175 +++ linux-2.6.39.4/fs/ntfs/dir.c 2011-08-05 19:44:37.000000000 -0400
39176 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
39177 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39178 ~(s64)(ndir->itype.index.block_size - 1)));
39179 /* Bounds checks. */
39180 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39181 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39182 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39183 "inode 0x%lx or driver bug.", vdir->i_ino);
39184 goto err_out;
39185 diff -urNp linux-2.6.39.4/fs/ntfs/file.c linux-2.6.39.4/fs/ntfs/file.c
39186 --- linux-2.6.39.4/fs/ntfs/file.c 2011-05-19 00:06:34.000000000 -0400
39187 +++ linux-2.6.39.4/fs/ntfs/file.c 2011-08-05 19:44:37.000000000 -0400
39188 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39189 #endif /* NTFS_RW */
39190 };
39191
39192 -const struct file_operations ntfs_empty_file_ops = {};
39193 +const struct file_operations ntfs_empty_file_ops __read_only;
39194
39195 -const struct inode_operations ntfs_empty_inode_ops = {};
39196 +const struct inode_operations ntfs_empty_inode_ops __read_only;
39197 diff -urNp linux-2.6.39.4/fs/ocfs2/localalloc.c linux-2.6.39.4/fs/ocfs2/localalloc.c
39198 --- linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-05-19 00:06:34.000000000 -0400
39199 +++ linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-08-05 19:44:37.000000000 -0400
39200 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39201 goto bail;
39202 }
39203
39204 - atomic_inc(&osb->alloc_stats.moves);
39205 + atomic_inc_unchecked(&osb->alloc_stats.moves);
39206
39207 bail:
39208 if (handle)
39209 diff -urNp linux-2.6.39.4/fs/ocfs2/namei.c linux-2.6.39.4/fs/ocfs2/namei.c
39210 --- linux-2.6.39.4/fs/ocfs2/namei.c 2011-05-19 00:06:34.000000000 -0400
39211 +++ linux-2.6.39.4/fs/ocfs2/namei.c 2011-08-05 19:44:37.000000000 -0400
39212 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39213 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39214 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39215
39216 + pax_track_stack();
39217 +
39218 /* At some point it might be nice to break this function up a
39219 * bit. */
39220
39221 diff -urNp linux-2.6.39.4/fs/ocfs2/ocfs2.h linux-2.6.39.4/fs/ocfs2/ocfs2.h
39222 --- linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-05-19 00:06:34.000000000 -0400
39223 +++ linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-08-05 19:44:37.000000000 -0400
39224 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
39225
39226 struct ocfs2_alloc_stats
39227 {
39228 - atomic_t moves;
39229 - atomic_t local_data;
39230 - atomic_t bitmap_data;
39231 - atomic_t bg_allocs;
39232 - atomic_t bg_extends;
39233 + atomic_unchecked_t moves;
39234 + atomic_unchecked_t local_data;
39235 + atomic_unchecked_t bitmap_data;
39236 + atomic_unchecked_t bg_allocs;
39237 + atomic_unchecked_t bg_extends;
39238 };
39239
39240 enum ocfs2_local_alloc_state
39241 diff -urNp linux-2.6.39.4/fs/ocfs2/suballoc.c linux-2.6.39.4/fs/ocfs2/suballoc.c
39242 --- linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-05-19 00:06:34.000000000 -0400
39243 +++ linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-08-05 19:44:37.000000000 -0400
39244 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39245 mlog_errno(status);
39246 goto bail;
39247 }
39248 - atomic_inc(&osb->alloc_stats.bg_extends);
39249 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39250
39251 /* You should never ask for this much metadata */
39252 BUG_ON(bits_wanted >
39253 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39254 mlog_errno(status);
39255 goto bail;
39256 }
39257 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39258 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39259
39260 *suballoc_loc = res.sr_bg_blkno;
39261 *suballoc_bit_start = res.sr_bit_offset;
39262 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39263 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39264 res->sr_bits);
39265
39266 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39267 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39268
39269 BUG_ON(res->sr_bits != 1);
39270
39271 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39272 mlog_errno(status);
39273 goto bail;
39274 }
39275 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39276 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39277
39278 BUG_ON(res.sr_bits != 1);
39279
39280 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39281 cluster_start,
39282 num_clusters);
39283 if (!status)
39284 - atomic_inc(&osb->alloc_stats.local_data);
39285 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
39286 } else {
39287 if (min_clusters > (osb->bitmap_cpg - 1)) {
39288 /* The only paths asking for contiguousness
39289 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39290 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39291 res.sr_bg_blkno,
39292 res.sr_bit_offset);
39293 - atomic_inc(&osb->alloc_stats.bitmap_data);
39294 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39295 *num_clusters = res.sr_bits;
39296 }
39297 }
39298 diff -urNp linux-2.6.39.4/fs/ocfs2/super.c linux-2.6.39.4/fs/ocfs2/super.c
39299 --- linux-2.6.39.4/fs/ocfs2/super.c 2011-05-19 00:06:34.000000000 -0400
39300 +++ linux-2.6.39.4/fs/ocfs2/super.c 2011-08-05 19:44:37.000000000 -0400
39301 @@ -299,11 +299,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39302 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39303 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39304 "Stats",
39305 - atomic_read(&osb->alloc_stats.bitmap_data),
39306 - atomic_read(&osb->alloc_stats.local_data),
39307 - atomic_read(&osb->alloc_stats.bg_allocs),
39308 - atomic_read(&osb->alloc_stats.moves),
39309 - atomic_read(&osb->alloc_stats.bg_extends));
39310 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39311 + atomic_read_unchecked(&osb->alloc_stats.local_data),
39312 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39313 + atomic_read_unchecked(&osb->alloc_stats.moves),
39314 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39315
39316 out += snprintf(buf + out, len - out,
39317 "%10s => State: %u Descriptor: %llu Size: %u bits "
39318 @@ -2111,11 +2111,11 @@ static int ocfs2_initialize_super(struct
39319 spin_lock_init(&osb->osb_xattr_lock);
39320 ocfs2_init_steal_slots(osb);
39321
39322 - atomic_set(&osb->alloc_stats.moves, 0);
39323 - atomic_set(&osb->alloc_stats.local_data, 0);
39324 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
39325 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
39326 - atomic_set(&osb->alloc_stats.bg_extends, 0);
39327 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39328 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39329 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39330 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39331 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39332
39333 /* Copy the blockcheck stats from the superblock probe */
39334 osb->osb_ecc_stats = *stats;
39335 diff -urNp linux-2.6.39.4/fs/ocfs2/symlink.c linux-2.6.39.4/fs/ocfs2/symlink.c
39336 --- linux-2.6.39.4/fs/ocfs2/symlink.c 2011-05-19 00:06:34.000000000 -0400
39337 +++ linux-2.6.39.4/fs/ocfs2/symlink.c 2011-08-05 19:44:37.000000000 -0400
39338 @@ -142,7 +142,7 @@ bail:
39339
39340 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39341 {
39342 - char *link = nd_get_link(nd);
39343 + const char *link = nd_get_link(nd);
39344 if (!IS_ERR(link))
39345 kfree(link);
39346 }
39347 diff -urNp linux-2.6.39.4/fs/open.c linux-2.6.39.4/fs/open.c
39348 --- linux-2.6.39.4/fs/open.c 2011-05-19 00:06:34.000000000 -0400
39349 +++ linux-2.6.39.4/fs/open.c 2011-08-05 19:44:37.000000000 -0400
39350 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39351 error = locks_verify_truncate(inode, NULL, length);
39352 if (!error)
39353 error = security_path_truncate(&path);
39354 +
39355 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39356 + error = -EACCES;
39357 +
39358 if (!error)
39359 error = do_truncate(path.dentry, length, 0, NULL);
39360
39361 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39362 if (__mnt_is_readonly(path.mnt))
39363 res = -EROFS;
39364
39365 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39366 + res = -EACCES;
39367 +
39368 out_path_release:
39369 path_put(&path);
39370 out:
39371 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39372 if (error)
39373 goto dput_and_out;
39374
39375 + gr_log_chdir(path.dentry, path.mnt);
39376 +
39377 set_fs_pwd(current->fs, &path);
39378
39379 dput_and_out:
39380 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39381 goto out_putf;
39382
39383 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39384 +
39385 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39386 + error = -EPERM;
39387 +
39388 + if (!error)
39389 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39390 +
39391 if (!error)
39392 set_fs_pwd(current->fs, &file->f_path);
39393 out_putf:
39394 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39395 if (error)
39396 goto dput_and_out;
39397
39398 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39399 + goto dput_and_out;
39400 +
39401 + if (gr_handle_chroot_caps(&path)) {
39402 + error = -ENOMEM;
39403 + goto dput_and_out;
39404 + }
39405 +
39406 set_fs_root(current->fs, &path);
39407 +
39408 + gr_handle_chroot_chdir(&path);
39409 +
39410 error = 0;
39411 dput_and_out:
39412 path_put(&path);
39413 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39414 err = mnt_want_write_file(file);
39415 if (err)
39416 goto out_putf;
39417 +
39418 mutex_lock(&inode->i_mutex);
39419 +
39420 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39421 + err = -EACCES;
39422 + goto out_unlock;
39423 + }
39424 +
39425 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39426 if (err)
39427 goto out_unlock;
39428 if (mode == (mode_t) -1)
39429 mode = inode->i_mode;
39430 +
39431 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39432 + err = -EACCES;
39433 + goto out_unlock;
39434 + }
39435 +
39436 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39437 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39438 err = notify_change(dentry, &newattrs);
39439 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39440 error = mnt_want_write(path.mnt);
39441 if (error)
39442 goto dput_and_out;
39443 +
39444 mutex_lock(&inode->i_mutex);
39445 +
39446 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39447 + error = -EACCES;
39448 + goto out_unlock;
39449 + }
39450 +
39451 error = security_path_chmod(path.dentry, path.mnt, mode);
39452 if (error)
39453 goto out_unlock;
39454 if (mode == (mode_t) -1)
39455 mode = inode->i_mode;
39456 +
39457 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39458 + error = -EACCES;
39459 + goto out_unlock;
39460 + }
39461 +
39462 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39463 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39464 error = notify_change(path.dentry, &newattrs);
39465 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39466 int error;
39467 struct iattr newattrs;
39468
39469 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
39470 + return -EACCES;
39471 +
39472 newattrs.ia_valid = ATTR_CTIME;
39473 if (user != (uid_t) -1) {
39474 newattrs.ia_valid |= ATTR_UID;
39475 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39476 if (!IS_ERR(tmp)) {
39477 fd = get_unused_fd_flags(flags);
39478 if (fd >= 0) {
39479 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39480 + struct file *f;
39481 + /* don't allow to be set by userland */
39482 + flags &= ~FMODE_GREXEC;
39483 + f = do_filp_open(dfd, tmp, &op, lookup);
39484 if (IS_ERR(f)) {
39485 put_unused_fd(fd);
39486 fd = PTR_ERR(f);
39487 diff -urNp linux-2.6.39.4/fs/partitions/ldm.c linux-2.6.39.4/fs/partitions/ldm.c
39488 --- linux-2.6.39.4/fs/partitions/ldm.c 2011-06-03 00:04:14.000000000 -0400
39489 +++ linux-2.6.39.4/fs/partitions/ldm.c 2011-08-05 19:44:37.000000000 -0400
39490 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39491 ldm_error ("A VBLK claims to have %d parts.", num);
39492 return false;
39493 }
39494 +
39495 if (rec >= num) {
39496 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39497 return false;
39498 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39499 goto found;
39500 }
39501
39502 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39503 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39504 if (!f) {
39505 ldm_crit ("Out of memory.");
39506 return false;
39507 diff -urNp linux-2.6.39.4/fs/pipe.c linux-2.6.39.4/fs/pipe.c
39508 --- linux-2.6.39.4/fs/pipe.c 2011-05-19 00:06:34.000000000 -0400
39509 +++ linux-2.6.39.4/fs/pipe.c 2011-08-05 19:44:37.000000000 -0400
39510 @@ -420,9 +420,9 @@ redo:
39511 }
39512 if (bufs) /* More to do? */
39513 continue;
39514 - if (!pipe->writers)
39515 + if (!atomic_read(&pipe->writers))
39516 break;
39517 - if (!pipe->waiting_writers) {
39518 + if (!atomic_read(&pipe->waiting_writers)) {
39519 /* syscall merging: Usually we must not sleep
39520 * if O_NONBLOCK is set, or if we got some data.
39521 * But if a writer sleeps in kernel space, then
39522 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39523 mutex_lock(&inode->i_mutex);
39524 pipe = inode->i_pipe;
39525
39526 - if (!pipe->readers) {
39527 + if (!atomic_read(&pipe->readers)) {
39528 send_sig(SIGPIPE, current, 0);
39529 ret = -EPIPE;
39530 goto out;
39531 @@ -530,7 +530,7 @@ redo1:
39532 for (;;) {
39533 int bufs;
39534
39535 - if (!pipe->readers) {
39536 + if (!atomic_read(&pipe->readers)) {
39537 send_sig(SIGPIPE, current, 0);
39538 if (!ret)
39539 ret = -EPIPE;
39540 @@ -616,9 +616,9 @@ redo2:
39541 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39542 do_wakeup = 0;
39543 }
39544 - pipe->waiting_writers++;
39545 + atomic_inc(&pipe->waiting_writers);
39546 pipe_wait(pipe);
39547 - pipe->waiting_writers--;
39548 + atomic_dec(&pipe->waiting_writers);
39549 }
39550 out:
39551 mutex_unlock(&inode->i_mutex);
39552 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39553 mask = 0;
39554 if (filp->f_mode & FMODE_READ) {
39555 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39556 - if (!pipe->writers && filp->f_version != pipe->w_counter)
39557 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39558 mask |= POLLHUP;
39559 }
39560
39561 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39562 * Most Unices do not set POLLERR for FIFOs but on Linux they
39563 * behave exactly like pipes for poll().
39564 */
39565 - if (!pipe->readers)
39566 + if (!atomic_read(&pipe->readers))
39567 mask |= POLLERR;
39568 }
39569
39570 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39571
39572 mutex_lock(&inode->i_mutex);
39573 pipe = inode->i_pipe;
39574 - pipe->readers -= decr;
39575 - pipe->writers -= decw;
39576 + atomic_sub(decr, &pipe->readers);
39577 + atomic_sub(decw, &pipe->writers);
39578
39579 - if (!pipe->readers && !pipe->writers) {
39580 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39581 free_pipe_info(inode);
39582 } else {
39583 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39584 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39585
39586 if (inode->i_pipe) {
39587 ret = 0;
39588 - inode->i_pipe->readers++;
39589 + atomic_inc(&inode->i_pipe->readers);
39590 }
39591
39592 mutex_unlock(&inode->i_mutex);
39593 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39594
39595 if (inode->i_pipe) {
39596 ret = 0;
39597 - inode->i_pipe->writers++;
39598 + atomic_inc(&inode->i_pipe->writers);
39599 }
39600
39601 mutex_unlock(&inode->i_mutex);
39602 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39603 if (inode->i_pipe) {
39604 ret = 0;
39605 if (filp->f_mode & FMODE_READ)
39606 - inode->i_pipe->readers++;
39607 + atomic_inc(&inode->i_pipe->readers);
39608 if (filp->f_mode & FMODE_WRITE)
39609 - inode->i_pipe->writers++;
39610 + atomic_inc(&inode->i_pipe->writers);
39611 }
39612
39613 mutex_unlock(&inode->i_mutex);
39614 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39615 inode->i_pipe = NULL;
39616 }
39617
39618 -static struct vfsmount *pipe_mnt __read_mostly;
39619 +struct vfsmount *pipe_mnt __read_mostly;
39620
39621 /*
39622 * pipefs_dname() is called from d_path().
39623 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39624 goto fail_iput;
39625 inode->i_pipe = pipe;
39626
39627 - pipe->readers = pipe->writers = 1;
39628 + atomic_set(&pipe->readers, 1);
39629 + atomic_set(&pipe->writers, 1);
39630 inode->i_fop = &rdwr_pipefifo_fops;
39631
39632 /*
39633 diff -urNp linux-2.6.39.4/fs/proc/array.c linux-2.6.39.4/fs/proc/array.c
39634 --- linux-2.6.39.4/fs/proc/array.c 2011-05-19 00:06:34.000000000 -0400
39635 +++ linux-2.6.39.4/fs/proc/array.c 2011-08-05 19:44:37.000000000 -0400
39636 @@ -60,6 +60,7 @@
39637 #include <linux/tty.h>
39638 #include <linux/string.h>
39639 #include <linux/mman.h>
39640 +#include <linux/grsecurity.h>
39641 #include <linux/proc_fs.h>
39642 #include <linux/ioport.h>
39643 #include <linux/uaccess.h>
39644 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39645 seq_putc(m, '\n');
39646 }
39647
39648 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39649 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
39650 +{
39651 + if (p->mm)
39652 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39653 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39654 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39655 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39656 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39657 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39658 + else
39659 + seq_printf(m, "PaX:\t-----\n");
39660 +}
39661 +#endif
39662 +
39663 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39664 struct pid *pid, struct task_struct *task)
39665 {
39666 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39667 task_cpus_allowed(m, task);
39668 cpuset_task_status_allowed(m, task);
39669 task_context_switch_counts(m, task);
39670 +
39671 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39672 + task_pax(m, task);
39673 +#endif
39674 +
39675 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39676 + task_grsec_rbac(m, task);
39677 +#endif
39678 +
39679 return 0;
39680 }
39681
39682 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39683 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39684 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39685 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39686 +#endif
39687 +
39688 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39689 struct pid *pid, struct task_struct *task, int whole)
39690 {
39691 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39692 cputime_t cutime, cstime, utime, stime;
39693 cputime_t cgtime, gtime;
39694 unsigned long rsslim = 0;
39695 - char tcomm[sizeof(task->comm)];
39696 + char tcomm[sizeof(task->comm)] = { 0 };
39697 unsigned long flags;
39698
39699 + pax_track_stack();
39700 +
39701 state = *get_task_state(task);
39702 vsize = eip = esp = 0;
39703 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39704 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39705 gtime = task->gtime;
39706 }
39707
39708 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39709 + if (PAX_RAND_FLAGS(mm)) {
39710 + eip = 0;
39711 + esp = 0;
39712 + wchan = 0;
39713 + }
39714 +#endif
39715 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39716 + wchan = 0;
39717 + eip =0;
39718 + esp =0;
39719 +#endif
39720 +
39721 /* scale priority and nice values from timeslices to -20..20 */
39722 /* to make it look like a "normal" Unix priority/nice value */
39723 priority = task_prio(task);
39724 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39725 vsize,
39726 mm ? get_mm_rss(mm) : 0,
39727 rsslim,
39728 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39729 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39730 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39731 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39732 +#else
39733 mm ? (permitted ? mm->start_code : 1) : 0,
39734 mm ? (permitted ? mm->end_code : 1) : 0,
39735 (permitted && mm) ? mm->start_stack : 0,
39736 +#endif
39737 esp,
39738 eip,
39739 /* The signal information here is obsolete.
39740 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39741
39742 return 0;
39743 }
39744 +
39745 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39746 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39747 +{
39748 + u32 curr_ip = 0;
39749 + unsigned long flags;
39750 +
39751 + if (lock_task_sighand(task, &flags)) {
39752 + curr_ip = task->signal->curr_ip;
39753 + unlock_task_sighand(task, &flags);
39754 + }
39755 +
39756 + return sprintf(buffer, "%pI4\n", &curr_ip);
39757 +}
39758 +#endif
39759 diff -urNp linux-2.6.39.4/fs/proc/base.c linux-2.6.39.4/fs/proc/base.c
39760 --- linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:11:51.000000000 -0400
39761 +++ linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:13:18.000000000 -0400
39762 @@ -104,6 +104,22 @@ struct pid_entry {
39763 union proc_op op;
39764 };
39765
39766 +struct getdents_callback {
39767 + struct linux_dirent __user * current_dir;
39768 + struct linux_dirent __user * previous;
39769 + struct file * file;
39770 + int count;
39771 + int error;
39772 +};
39773 +
39774 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39775 + loff_t offset, u64 ino, unsigned int d_type)
39776 +{
39777 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
39778 + buf->error = -EINVAL;
39779 + return 0;
39780 +}
39781 +
39782 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39783 .name = (NAME), \
39784 .len = sizeof(NAME) - 1, \
39785 @@ -206,6 +222,9 @@ static struct mm_struct *__check_mem_per
39786 if (task == current)
39787 return mm;
39788
39789 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39790 + return ERR_PTR(-EPERM);
39791 +
39792 /*
39793 * If current is actively ptrace'ing, and would also be
39794 * permitted to freshly attach with ptrace now, permit it.
39795 @@ -279,6 +298,9 @@ static int proc_pid_cmdline(struct task_
39796 if (!mm->arg_end)
39797 goto out_mm; /* Shh! No looking before we're done */
39798
39799 + if (gr_acl_handle_procpidmem(task))
39800 + goto out_mm;
39801 +
39802 len = mm->arg_end - mm->arg_start;
39803
39804 if (len > PAGE_SIZE)
39805 @@ -306,12 +328,28 @@ out:
39806 return res;
39807 }
39808
39809 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39810 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39811 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39812 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39813 +#endif
39814 +
39815 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39816 {
39817 struct mm_struct *mm = mm_for_maps(task);
39818 int res = PTR_ERR(mm);
39819 if (mm && !IS_ERR(mm)) {
39820 unsigned int nwords = 0;
39821 +
39822 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39823 + /* allow if we're currently ptracing this task */
39824 + if (PAX_RAND_FLAGS(mm) &&
39825 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39826 + mmput(mm);
39827 + return res;
39828 + }
39829 +#endif
39830 +
39831 do {
39832 nwords += 2;
39833 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39834 @@ -325,7 +363,7 @@ static int proc_pid_auxv(struct task_str
39835 }
39836
39837
39838 -#ifdef CONFIG_KALLSYMS
39839 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39840 /*
39841 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39842 * Returns the resolved symbol. If that fails, simply return the address.
39843 @@ -364,7 +402,7 @@ static void unlock_trace(struct task_str
39844 mutex_unlock(&task->signal->cred_guard_mutex);
39845 }
39846
39847 -#ifdef CONFIG_STACKTRACE
39848 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39849
39850 #define MAX_STACK_TRACE_DEPTH 64
39851
39852 @@ -555,7 +593,7 @@ static int proc_pid_limits(struct task_s
39853 return count;
39854 }
39855
39856 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39857 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39858 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39859 {
39860 long nr;
39861 @@ -584,7 +622,7 @@ static int proc_pid_syscall(struct task_
39862 /************************************************************************/
39863
39864 /* permission checks */
39865 -static int proc_fd_access_allowed(struct inode *inode)
39866 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39867 {
39868 struct task_struct *task;
39869 int allowed = 0;
39870 @@ -594,7 +632,10 @@ static int proc_fd_access_allowed(struct
39871 */
39872 task = get_proc_task(inode);
39873 if (task) {
39874 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39875 + if (log)
39876 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39877 + else
39878 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39879 put_task_struct(task);
39880 }
39881 return allowed;
39882 @@ -973,6 +1014,9 @@ static ssize_t environ_read(struct file
39883 if (!task)
39884 goto out_no_task;
39885
39886 + if (gr_acl_handle_procpidmem(task))
39887 + goto out;
39888 +
39889 ret = -ENOMEM;
39890 page = (char *)__get_free_page(GFP_TEMPORARY);
39891 if (!page)
39892 @@ -1660,7 +1704,7 @@ static void *proc_pid_follow_link(struct
39893 path_put(&nd->path);
39894
39895 /* Are we allowed to snoop on the tasks file descriptors? */
39896 - if (!proc_fd_access_allowed(inode))
39897 + if (!proc_fd_access_allowed(inode,0))
39898 goto out;
39899
39900 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39901 @@ -1699,8 +1743,18 @@ static int proc_pid_readlink(struct dent
39902 struct path path;
39903
39904 /* Are we allowed to snoop on the tasks file descriptors? */
39905 - if (!proc_fd_access_allowed(inode))
39906 - goto out;
39907 + /* logging this is needed for learning on chromium to work properly,
39908 + but we don't want to flood the logs from 'ps' which does a readlink
39909 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39910 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
39911 + */
39912 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39913 + if (!proc_fd_access_allowed(inode,0))
39914 + goto out;
39915 + } else {
39916 + if (!proc_fd_access_allowed(inode,1))
39917 + goto out;
39918 + }
39919
39920 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39921 if (error)
39922 @@ -1766,7 +1820,11 @@ static struct inode *proc_pid_make_inode
39923 rcu_read_lock();
39924 cred = __task_cred(task);
39925 inode->i_uid = cred->euid;
39926 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39927 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39928 +#else
39929 inode->i_gid = cred->egid;
39930 +#endif
39931 rcu_read_unlock();
39932 }
39933 security_task_to_inode(task, inode);
39934 @@ -1784,6 +1842,9 @@ static int pid_getattr(struct vfsmount *
39935 struct inode *inode = dentry->d_inode;
39936 struct task_struct *task;
39937 const struct cred *cred;
39938 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39939 + const struct cred *tmpcred = current_cred();
39940 +#endif
39941
39942 generic_fillattr(inode, stat);
39943
39944 @@ -1791,13 +1852,41 @@ static int pid_getattr(struct vfsmount *
39945 stat->uid = 0;
39946 stat->gid = 0;
39947 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39948 +
39949 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39950 + rcu_read_unlock();
39951 + return -ENOENT;
39952 + }
39953 +
39954 if (task) {
39955 + cred = __task_cred(task);
39956 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39957 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39958 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39959 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39960 +#endif
39961 + ) {
39962 +#endif
39963 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39964 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39965 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39966 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39967 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39968 +#endif
39969 task_dumpable(task)) {
39970 - cred = __task_cred(task);
39971 stat->uid = cred->euid;
39972 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39973 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39974 +#else
39975 stat->gid = cred->egid;
39976 +#endif
39977 }
39978 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39979 + } else {
39980 + rcu_read_unlock();
39981 + return -ENOENT;
39982 + }
39983 +#endif
39984 }
39985 rcu_read_unlock();
39986 return 0;
39987 @@ -1834,11 +1923,20 @@ static int pid_revalidate(struct dentry
39988
39989 if (task) {
39990 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39991 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39992 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39993 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39994 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39995 +#endif
39996 task_dumpable(task)) {
39997 rcu_read_lock();
39998 cred = __task_cred(task);
39999 inode->i_uid = cred->euid;
40000 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40001 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40002 +#else
40003 inode->i_gid = cred->egid;
40004 +#endif
40005 rcu_read_unlock();
40006 } else {
40007 inode->i_uid = 0;
40008 @@ -1959,7 +2057,8 @@ static int proc_fd_info(struct inode *in
40009 int fd = proc_fd(inode);
40010
40011 if (task) {
40012 - files = get_files_struct(task);
40013 + if (!gr_acl_handle_procpidmem(task))
40014 + files = get_files_struct(task);
40015 put_task_struct(task);
40016 }
40017 if (files) {
40018 @@ -2219,15 +2318,25 @@ static const struct file_operations proc
40019 */
40020 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
40021 {
40022 + struct task_struct *task;
40023 int rv;
40024
40025 if (flags & IPERM_FLAG_RCU)
40026 return -ECHILD;
40027 rv = generic_permission(inode, mask, flags, NULL);
40028 - if (rv == 0)
40029 - return 0;
40030 +
40031 if (task_pid(current) == proc_pid(inode))
40032 rv = 0;
40033 +
40034 + task = get_proc_task(inode);
40035 + if (task == NULL)
40036 + return rv;
40037 +
40038 + if (gr_acl_handle_procpidmem(task))
40039 + rv = -EACCES;
40040 +
40041 + put_task_struct(task);
40042 +
40043 return rv;
40044 }
40045
40046 @@ -2337,6 +2446,9 @@ static struct dentry *proc_pident_lookup
40047 if (!task)
40048 goto out_no_task;
40049
40050 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40051 + goto out;
40052 +
40053 /*
40054 * Yes, it does not scale. And it should not. Don't add
40055 * new entries into /proc/<tgid>/ without very good reasons.
40056 @@ -2381,6 +2493,9 @@ static int proc_pident_readdir(struct fi
40057 if (!task)
40058 goto out_no_task;
40059
40060 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40061 + goto out;
40062 +
40063 ret = 0;
40064 i = filp->f_pos;
40065 switch (i) {
40066 @@ -2651,7 +2766,7 @@ static void *proc_self_follow_link(struc
40067 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
40068 void *cookie)
40069 {
40070 - char *s = nd_get_link(nd);
40071 + const char *s = nd_get_link(nd);
40072 if (!IS_ERR(s))
40073 __putname(s);
40074 }
40075 @@ -2838,7 +2953,7 @@ static const struct pid_entry tgid_base_
40076 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
40077 #endif
40078 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40079 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40080 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40081 INF("syscall", S_IRUGO, proc_pid_syscall),
40082 #endif
40083 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40084 @@ -2863,10 +2978,10 @@ static const struct pid_entry tgid_base_
40085 #ifdef CONFIG_SECURITY
40086 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40087 #endif
40088 -#ifdef CONFIG_KALLSYMS
40089 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40090 INF("wchan", S_IRUGO, proc_pid_wchan),
40091 #endif
40092 -#ifdef CONFIG_STACKTRACE
40093 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40094 ONE("stack", S_IRUGO, proc_pid_stack),
40095 #endif
40096 #ifdef CONFIG_SCHEDSTATS
40097 @@ -2897,6 +3012,9 @@ static const struct pid_entry tgid_base_
40098 #ifdef CONFIG_TASK_IO_ACCOUNTING
40099 INF("io", S_IRUSR, proc_tgid_io_accounting),
40100 #endif
40101 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40102 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
40103 +#endif
40104 };
40105
40106 static int proc_tgid_base_readdir(struct file * filp,
40107 @@ -3022,7 +3140,14 @@ static struct dentry *proc_pid_instantia
40108 if (!inode)
40109 goto out;
40110
40111 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40112 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
40113 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40114 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40115 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
40116 +#else
40117 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
40118 +#endif
40119 inode->i_op = &proc_tgid_base_inode_operations;
40120 inode->i_fop = &proc_tgid_base_operations;
40121 inode->i_flags|=S_IMMUTABLE;
40122 @@ -3064,7 +3189,11 @@ struct dentry *proc_pid_lookup(struct in
40123 if (!task)
40124 goto out;
40125
40126 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40127 + goto out_put_task;
40128 +
40129 result = proc_pid_instantiate(dir, dentry, task, NULL);
40130 +out_put_task:
40131 put_task_struct(task);
40132 out:
40133 return result;
40134 @@ -3129,6 +3258,11 @@ int proc_pid_readdir(struct file * filp,
40135 {
40136 unsigned int nr;
40137 struct task_struct *reaper;
40138 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40139 + const struct cred *tmpcred = current_cred();
40140 + const struct cred *itercred;
40141 +#endif
40142 + filldir_t __filldir = filldir;
40143 struct tgid_iter iter;
40144 struct pid_namespace *ns;
40145
40146 @@ -3152,8 +3286,27 @@ int proc_pid_readdir(struct file * filp,
40147 for (iter = next_tgid(ns, iter);
40148 iter.task;
40149 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40150 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40151 + rcu_read_lock();
40152 + itercred = __task_cred(iter.task);
40153 +#endif
40154 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40155 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40156 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40157 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40158 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40159 +#endif
40160 + )
40161 +#endif
40162 + )
40163 + __filldir = &gr_fake_filldir;
40164 + else
40165 + __filldir = filldir;
40166 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40167 + rcu_read_unlock();
40168 +#endif
40169 filp->f_pos = iter.tgid + TGID_OFFSET;
40170 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40171 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40172 put_task_struct(iter.task);
40173 goto out;
40174 }
40175 @@ -3180,7 +3333,7 @@ static const struct pid_entry tid_base_s
40176 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40177 #endif
40178 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40179 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40180 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40181 INF("syscall", S_IRUGO, proc_pid_syscall),
40182 #endif
40183 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40184 @@ -3204,10 +3357,10 @@ static const struct pid_entry tid_base_s
40185 #ifdef CONFIG_SECURITY
40186 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40187 #endif
40188 -#ifdef CONFIG_KALLSYMS
40189 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40190 INF("wchan", S_IRUGO, proc_pid_wchan),
40191 #endif
40192 -#ifdef CONFIG_STACKTRACE
40193 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40194 ONE("stack", S_IRUGO, proc_pid_stack),
40195 #endif
40196 #ifdef CONFIG_SCHEDSTATS
40197 diff -urNp linux-2.6.39.4/fs/proc/cmdline.c linux-2.6.39.4/fs/proc/cmdline.c
40198 --- linux-2.6.39.4/fs/proc/cmdline.c 2011-05-19 00:06:34.000000000 -0400
40199 +++ linux-2.6.39.4/fs/proc/cmdline.c 2011-08-05 19:44:37.000000000 -0400
40200 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
40201
40202 static int __init proc_cmdline_init(void)
40203 {
40204 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40205 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40206 +#else
40207 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40208 +#endif
40209 return 0;
40210 }
40211 module_init(proc_cmdline_init);
40212 diff -urNp linux-2.6.39.4/fs/proc/devices.c linux-2.6.39.4/fs/proc/devices.c
40213 --- linux-2.6.39.4/fs/proc/devices.c 2011-05-19 00:06:34.000000000 -0400
40214 +++ linux-2.6.39.4/fs/proc/devices.c 2011-08-05 19:44:37.000000000 -0400
40215 @@ -64,7 +64,11 @@ static const struct file_operations proc
40216
40217 static int __init proc_devices_init(void)
40218 {
40219 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40220 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40221 +#else
40222 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40223 +#endif
40224 return 0;
40225 }
40226 module_init(proc_devices_init);
40227 diff -urNp linux-2.6.39.4/fs/proc/inode.c linux-2.6.39.4/fs/proc/inode.c
40228 --- linux-2.6.39.4/fs/proc/inode.c 2011-05-19 00:06:34.000000000 -0400
40229 +++ linux-2.6.39.4/fs/proc/inode.c 2011-08-05 19:44:37.000000000 -0400
40230 @@ -433,7 +433,11 @@ struct inode *proc_get_inode(struct supe
40231 if (de->mode) {
40232 inode->i_mode = de->mode;
40233 inode->i_uid = de->uid;
40234 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40235 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40236 +#else
40237 inode->i_gid = de->gid;
40238 +#endif
40239 }
40240 if (de->size)
40241 inode->i_size = de->size;
40242 diff -urNp linux-2.6.39.4/fs/proc/internal.h linux-2.6.39.4/fs/proc/internal.h
40243 --- linux-2.6.39.4/fs/proc/internal.h 2011-05-19 00:06:34.000000000 -0400
40244 +++ linux-2.6.39.4/fs/proc/internal.h 2011-08-05 19:44:37.000000000 -0400
40245 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40246 struct pid *pid, struct task_struct *task);
40247 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40248 struct pid *pid, struct task_struct *task);
40249 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40250 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40251 +#endif
40252 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40253
40254 extern const struct file_operations proc_maps_operations;
40255 diff -urNp linux-2.6.39.4/fs/proc/Kconfig linux-2.6.39.4/fs/proc/Kconfig
40256 --- linux-2.6.39.4/fs/proc/Kconfig 2011-05-19 00:06:34.000000000 -0400
40257 +++ linux-2.6.39.4/fs/proc/Kconfig 2011-08-05 19:44:37.000000000 -0400
40258 @@ -30,12 +30,12 @@ config PROC_FS
40259
40260 config PROC_KCORE
40261 bool "/proc/kcore support" if !ARM
40262 - depends on PROC_FS && MMU
40263 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40264
40265 config PROC_VMCORE
40266 bool "/proc/vmcore support"
40267 - depends on PROC_FS && CRASH_DUMP
40268 - default y
40269 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40270 + default n
40271 help
40272 Exports the dump image of crashed kernel in ELF format.
40273
40274 @@ -59,8 +59,8 @@ config PROC_SYSCTL
40275 limited in memory.
40276
40277 config PROC_PAGE_MONITOR
40278 - default y
40279 - depends on PROC_FS && MMU
40280 + default n
40281 + depends on PROC_FS && MMU && !GRKERNSEC
40282 bool "Enable /proc page monitoring" if EXPERT
40283 help
40284 Various /proc files exist to monitor process memory utilization:
40285 diff -urNp linux-2.6.39.4/fs/proc/kcore.c linux-2.6.39.4/fs/proc/kcore.c
40286 --- linux-2.6.39.4/fs/proc/kcore.c 2011-05-19 00:06:34.000000000 -0400
40287 +++ linux-2.6.39.4/fs/proc/kcore.c 2011-08-05 19:44:37.000000000 -0400
40288 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40289 off_t offset = 0;
40290 struct kcore_list *m;
40291
40292 + pax_track_stack();
40293 +
40294 /* setup ELF header */
40295 elf = (struct elfhdr *) bufp;
40296 bufp += sizeof(struct elfhdr);
40297 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40298 * the addresses in the elf_phdr on our list.
40299 */
40300 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40301 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40302 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40303 + if (tsz > buflen)
40304 tsz = buflen;
40305 -
40306 +
40307 while (buflen) {
40308 struct kcore_list *m;
40309
40310 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40311 kfree(elf_buf);
40312 } else {
40313 if (kern_addr_valid(start)) {
40314 - unsigned long n;
40315 + char *elf_buf;
40316 + mm_segment_t oldfs;
40317
40318 - n = copy_to_user(buffer, (char *)start, tsz);
40319 - /*
40320 - * We cannot distingush between fault on source
40321 - * and fault on destination. When this happens
40322 - * we clear too and hope it will trigger the
40323 - * EFAULT again.
40324 - */
40325 - if (n) {
40326 - if (clear_user(buffer + tsz - n,
40327 - n))
40328 + elf_buf = kmalloc(tsz, GFP_KERNEL);
40329 + if (!elf_buf)
40330 + return -ENOMEM;
40331 + oldfs = get_fs();
40332 + set_fs(KERNEL_DS);
40333 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40334 + set_fs(oldfs);
40335 + if (copy_to_user(buffer, elf_buf, tsz)) {
40336 + kfree(elf_buf);
40337 return -EFAULT;
40338 + }
40339 }
40340 + set_fs(oldfs);
40341 + kfree(elf_buf);
40342 } else {
40343 if (clear_user(buffer, tsz))
40344 return -EFAULT;
40345 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40346
40347 static int open_kcore(struct inode *inode, struct file *filp)
40348 {
40349 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40350 + return -EPERM;
40351 +#endif
40352 if (!capable(CAP_SYS_RAWIO))
40353 return -EPERM;
40354 if (kcore_need_update)
40355 diff -urNp linux-2.6.39.4/fs/proc/meminfo.c linux-2.6.39.4/fs/proc/meminfo.c
40356 --- linux-2.6.39.4/fs/proc/meminfo.c 2011-05-19 00:06:34.000000000 -0400
40357 +++ linux-2.6.39.4/fs/proc/meminfo.c 2011-08-05 19:44:37.000000000 -0400
40358 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40359 unsigned long pages[NR_LRU_LISTS];
40360 int lru;
40361
40362 + pax_track_stack();
40363 +
40364 /*
40365 * display in kilobytes.
40366 */
40367 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40368 vmi.used >> 10,
40369 vmi.largest_chunk >> 10
40370 #ifdef CONFIG_MEMORY_FAILURE
40371 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40372 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40373 #endif
40374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40375 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40376 diff -urNp linux-2.6.39.4/fs/proc/nommu.c linux-2.6.39.4/fs/proc/nommu.c
40377 --- linux-2.6.39.4/fs/proc/nommu.c 2011-05-19 00:06:34.000000000 -0400
40378 +++ linux-2.6.39.4/fs/proc/nommu.c 2011-08-05 19:44:37.000000000 -0400
40379 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40380 if (len < 1)
40381 len = 1;
40382 seq_printf(m, "%*c", len, ' ');
40383 - seq_path(m, &file->f_path, "");
40384 + seq_path(m, &file->f_path, "\n\\");
40385 }
40386
40387 seq_putc(m, '\n');
40388 diff -urNp linux-2.6.39.4/fs/proc/proc_net.c linux-2.6.39.4/fs/proc/proc_net.c
40389 --- linux-2.6.39.4/fs/proc/proc_net.c 2011-05-19 00:06:34.000000000 -0400
40390 +++ linux-2.6.39.4/fs/proc/proc_net.c 2011-08-05 19:44:37.000000000 -0400
40391 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40392 struct task_struct *task;
40393 struct nsproxy *ns;
40394 struct net *net = NULL;
40395 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40396 + const struct cred *cred = current_cred();
40397 +#endif
40398 +
40399 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40400 + if (cred->fsuid)
40401 + return net;
40402 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40403 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40404 + return net;
40405 +#endif
40406
40407 rcu_read_lock();
40408 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40409 diff -urNp linux-2.6.39.4/fs/proc/proc_sysctl.c linux-2.6.39.4/fs/proc/proc_sysctl.c
40410 --- linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-05-19 00:06:34.000000000 -0400
40411 +++ linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-08-05 19:44:37.000000000 -0400
40412 @@ -8,6 +8,8 @@
40413 #include <linux/namei.h>
40414 #include "internal.h"
40415
40416 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40417 +
40418 static const struct dentry_operations proc_sys_dentry_operations;
40419 static const struct file_operations proc_sys_file_operations;
40420 static const struct inode_operations proc_sys_inode_operations;
40421 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40422 if (!p)
40423 goto out;
40424
40425 + if (gr_handle_sysctl(p, MAY_EXEC))
40426 + goto out;
40427 +
40428 err = ERR_PTR(-ENOMEM);
40429 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40430 if (h)
40431 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40432 if (*pos < file->f_pos)
40433 continue;
40434
40435 + if (gr_handle_sysctl(table, 0))
40436 + continue;
40437 +
40438 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40439 if (res)
40440 return res;
40441 @@ -358,6 +366,9 @@ static int proc_sys_getattr(struct vfsmo
40442 if (IS_ERR(head))
40443 return PTR_ERR(head);
40444
40445 + if (table && gr_handle_sysctl(table, MAY_EXEC))
40446 + return -ENOENT;
40447 +
40448 generic_fillattr(inode, stat);
40449 if (table)
40450 stat->mode = (stat->mode & S_IFMT) | table->mode;
40451 diff -urNp linux-2.6.39.4/fs/proc/root.c linux-2.6.39.4/fs/proc/root.c
40452 --- linux-2.6.39.4/fs/proc/root.c 2011-05-19 00:06:34.000000000 -0400
40453 +++ linux-2.6.39.4/fs/proc/root.c 2011-08-05 19:44:37.000000000 -0400
40454 @@ -122,7 +122,15 @@ void __init proc_root_init(void)
40455 #ifdef CONFIG_PROC_DEVICETREE
40456 proc_device_tree_init();
40457 #endif
40458 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40459 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40460 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40461 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40462 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40463 +#endif
40464 +#else
40465 proc_mkdir("bus", NULL);
40466 +#endif
40467 proc_sys_init();
40468 }
40469
40470 diff -urNp linux-2.6.39.4/fs/proc/task_mmu.c linux-2.6.39.4/fs/proc/task_mmu.c
40471 --- linux-2.6.39.4/fs/proc/task_mmu.c 2011-05-19 00:06:34.000000000 -0400
40472 +++ linux-2.6.39.4/fs/proc/task_mmu.c 2011-08-05 19:44:37.000000000 -0400
40473 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40474 "VmExe:\t%8lu kB\n"
40475 "VmLib:\t%8lu kB\n"
40476 "VmPTE:\t%8lu kB\n"
40477 - "VmSwap:\t%8lu kB\n",
40478 - hiwater_vm << (PAGE_SHIFT-10),
40479 + "VmSwap:\t%8lu kB\n"
40480 +
40481 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40482 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40483 +#endif
40484 +
40485 + ,hiwater_vm << (PAGE_SHIFT-10),
40486 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40487 mm->locked_vm << (PAGE_SHIFT-10),
40488 hiwater_rss << (PAGE_SHIFT-10),
40489 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40490 data << (PAGE_SHIFT-10),
40491 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40492 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40493 - swap << (PAGE_SHIFT-10));
40494 + swap << (PAGE_SHIFT-10)
40495 +
40496 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40497 + , mm->context.user_cs_base, mm->context.user_cs_limit
40498 +#endif
40499 +
40500 + );
40501 }
40502
40503 unsigned long task_vsize(struct mm_struct *mm)
40504 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40505 return ret;
40506 }
40507
40508 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40509 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40510 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
40511 + _mm->pax_flags & MF_PAX_SEGMEXEC))
40512 +#endif
40513 +
40514 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40515 {
40516 struct mm_struct *mm = vma->vm_mm;
40517 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40518 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40519 }
40520
40521 - /* We don't show the stack guard page in /proc/maps */
40522 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40523 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40524 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40525 +#else
40526 start = vma->vm_start;
40527 - if (stack_guard_page_start(vma, start))
40528 - start += PAGE_SIZE;
40529 end = vma->vm_end;
40530 - if (stack_guard_page_end(vma, end))
40531 - end -= PAGE_SIZE;
40532 +#endif
40533
40534 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40535 start,
40536 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40537 flags & VM_WRITE ? 'w' : '-',
40538 flags & VM_EXEC ? 'x' : '-',
40539 flags & VM_MAYSHARE ? 's' : 'p',
40540 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40541 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40542 +#else
40543 pgoff,
40544 +#endif
40545 MAJOR(dev), MINOR(dev), ino, &len);
40546
40547 /*
40548 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40549 */
40550 if (file) {
40551 pad_len_spaces(m, len);
40552 - seq_path(m, &file->f_path, "\n");
40553 + seq_path(m, &file->f_path, "\n\\");
40554 } else {
40555 const char *name = arch_vma_name(vma);
40556 if (!name) {
40557 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40558 if (vma->vm_start <= mm->brk &&
40559 vma->vm_end >= mm->start_brk) {
40560 name = "[heap]";
40561 - } else if (vma->vm_start <= mm->start_stack &&
40562 - vma->vm_end >= mm->start_stack) {
40563 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40564 + (vma->vm_start <= mm->start_stack &&
40565 + vma->vm_end >= mm->start_stack)) {
40566 name = "[stack]";
40567 }
40568 } else {
40569 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40570 };
40571
40572 memset(&mss, 0, sizeof mss);
40573 - mss.vma = vma;
40574 - /* mmap_sem is held in m_start */
40575 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40576 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40577 -
40578 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40579 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40580 +#endif
40581 + mss.vma = vma;
40582 + /* mmap_sem is held in m_start */
40583 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40584 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40585 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40586 + }
40587 +#endif
40588 show_map_vma(m, vma);
40589
40590 seq_printf(m,
40591 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40592 "KernelPageSize: %8lu kB\n"
40593 "MMUPageSize: %8lu kB\n"
40594 "Locked: %8lu kB\n",
40595 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40596 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40597 +#else
40598 (vma->vm_end - vma->vm_start) >> 10,
40599 +#endif
40600 mss.resident >> 10,
40601 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40602 mss.shared_clean >> 10,
40603 diff -urNp linux-2.6.39.4/fs/proc/task_nommu.c linux-2.6.39.4/fs/proc/task_nommu.c
40604 --- linux-2.6.39.4/fs/proc/task_nommu.c 2011-05-19 00:06:34.000000000 -0400
40605 +++ linux-2.6.39.4/fs/proc/task_nommu.c 2011-08-05 19:44:37.000000000 -0400
40606 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40607 else
40608 bytes += kobjsize(mm);
40609
40610 - if (current->fs && current->fs->users > 1)
40611 + if (current->fs && atomic_read(&current->fs->users) > 1)
40612 sbytes += kobjsize(current->fs);
40613 else
40614 bytes += kobjsize(current->fs);
40615 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40616
40617 if (file) {
40618 pad_len_spaces(m, len);
40619 - seq_path(m, &file->f_path, "");
40620 + seq_path(m, &file->f_path, "\n\\");
40621 } else if (mm) {
40622 if (vma->vm_start <= mm->start_stack &&
40623 vma->vm_end >= mm->start_stack) {
40624 diff -urNp linux-2.6.39.4/fs/quota/netlink.c linux-2.6.39.4/fs/quota/netlink.c
40625 --- linux-2.6.39.4/fs/quota/netlink.c 2011-05-19 00:06:34.000000000 -0400
40626 +++ linux-2.6.39.4/fs/quota/netlink.c 2011-08-05 19:44:37.000000000 -0400
40627 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40628 void quota_send_warning(short type, unsigned int id, dev_t dev,
40629 const char warntype)
40630 {
40631 - static atomic_t seq;
40632 + static atomic_unchecked_t seq;
40633 struct sk_buff *skb;
40634 void *msg_head;
40635 int ret;
40636 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40637 "VFS: Not enough memory to send quota warning.\n");
40638 return;
40639 }
40640 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40641 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40642 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40643 if (!msg_head) {
40644 printk(KERN_ERR
40645 diff -urNp linux-2.6.39.4/fs/readdir.c linux-2.6.39.4/fs/readdir.c
40646 --- linux-2.6.39.4/fs/readdir.c 2011-05-19 00:06:34.000000000 -0400
40647 +++ linux-2.6.39.4/fs/readdir.c 2011-08-05 19:44:37.000000000 -0400
40648 @@ -17,6 +17,7 @@
40649 #include <linux/security.h>
40650 #include <linux/syscalls.h>
40651 #include <linux/unistd.h>
40652 +#include <linux/namei.h>
40653
40654 #include <asm/uaccess.h>
40655
40656 @@ -67,6 +68,7 @@ struct old_linux_dirent {
40657
40658 struct readdir_callback {
40659 struct old_linux_dirent __user * dirent;
40660 + struct file * file;
40661 int result;
40662 };
40663
40664 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40665 buf->result = -EOVERFLOW;
40666 return -EOVERFLOW;
40667 }
40668 +
40669 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40670 + return 0;
40671 +
40672 buf->result++;
40673 dirent = buf->dirent;
40674 if (!access_ok(VERIFY_WRITE, dirent,
40675 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40676
40677 buf.result = 0;
40678 buf.dirent = dirent;
40679 + buf.file = file;
40680
40681 error = vfs_readdir(file, fillonedir, &buf);
40682 if (buf.result)
40683 @@ -142,6 +149,7 @@ struct linux_dirent {
40684 struct getdents_callback {
40685 struct linux_dirent __user * current_dir;
40686 struct linux_dirent __user * previous;
40687 + struct file * file;
40688 int count;
40689 int error;
40690 };
40691 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40692 buf->error = -EOVERFLOW;
40693 return -EOVERFLOW;
40694 }
40695 +
40696 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40697 + return 0;
40698 +
40699 dirent = buf->previous;
40700 if (dirent) {
40701 if (__put_user(offset, &dirent->d_off))
40702 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40703 buf.previous = NULL;
40704 buf.count = count;
40705 buf.error = 0;
40706 + buf.file = file;
40707
40708 error = vfs_readdir(file, filldir, &buf);
40709 if (error >= 0)
40710 @@ -229,6 +242,7 @@ out:
40711 struct getdents_callback64 {
40712 struct linux_dirent64 __user * current_dir;
40713 struct linux_dirent64 __user * previous;
40714 + struct file *file;
40715 int count;
40716 int error;
40717 };
40718 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40719 buf->error = -EINVAL; /* only used if we fail.. */
40720 if (reclen > buf->count)
40721 return -EINVAL;
40722 +
40723 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40724 + return 0;
40725 +
40726 dirent = buf->previous;
40727 if (dirent) {
40728 if (__put_user(offset, &dirent->d_off))
40729 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40730
40731 buf.current_dir = dirent;
40732 buf.previous = NULL;
40733 + buf.file = file;
40734 buf.count = count;
40735 buf.error = 0;
40736
40737 diff -urNp linux-2.6.39.4/fs/reiserfs/dir.c linux-2.6.39.4/fs/reiserfs/dir.c
40738 --- linux-2.6.39.4/fs/reiserfs/dir.c 2011-05-19 00:06:34.000000000 -0400
40739 +++ linux-2.6.39.4/fs/reiserfs/dir.c 2011-08-05 19:44:37.000000000 -0400
40740 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40741 struct reiserfs_dir_entry de;
40742 int ret = 0;
40743
40744 + pax_track_stack();
40745 +
40746 reiserfs_write_lock(inode->i_sb);
40747
40748 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40749 diff -urNp linux-2.6.39.4/fs/reiserfs/do_balan.c linux-2.6.39.4/fs/reiserfs/do_balan.c
40750 --- linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-05-19 00:06:34.000000000 -0400
40751 +++ linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-08-05 19:44:37.000000000 -0400
40752 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40753 return;
40754 }
40755
40756 - atomic_inc(&(fs_generation(tb->tb_sb)));
40757 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40758 do_balance_starts(tb);
40759
40760 /* balance leaf returns 0 except if combining L R and S into
40761 diff -urNp linux-2.6.39.4/fs/reiserfs/journal.c linux-2.6.39.4/fs/reiserfs/journal.c
40762 --- linux-2.6.39.4/fs/reiserfs/journal.c 2011-05-19 00:06:34.000000000 -0400
40763 +++ linux-2.6.39.4/fs/reiserfs/journal.c 2011-08-05 19:44:37.000000000 -0400
40764 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40765 struct buffer_head *bh;
40766 int i, j;
40767
40768 + pax_track_stack();
40769 +
40770 bh = __getblk(dev, block, bufsize);
40771 if (buffer_uptodate(bh))
40772 return (bh);
40773 diff -urNp linux-2.6.39.4/fs/reiserfs/namei.c linux-2.6.39.4/fs/reiserfs/namei.c
40774 --- linux-2.6.39.4/fs/reiserfs/namei.c 2011-05-19 00:06:34.000000000 -0400
40775 +++ linux-2.6.39.4/fs/reiserfs/namei.c 2011-08-05 19:44:37.000000000 -0400
40776 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40777 unsigned long savelink = 1;
40778 struct timespec ctime;
40779
40780 + pax_track_stack();
40781 +
40782 /* three balancings: (1) old name removal, (2) new name insertion
40783 and (3) maybe "save" link insertion
40784 stat data updates: (1) old directory,
40785 diff -urNp linux-2.6.39.4/fs/reiserfs/procfs.c linux-2.6.39.4/fs/reiserfs/procfs.c
40786 --- linux-2.6.39.4/fs/reiserfs/procfs.c 2011-05-19 00:06:34.000000000 -0400
40787 +++ linux-2.6.39.4/fs/reiserfs/procfs.c 2011-08-05 19:44:37.000000000 -0400
40788 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40789 "SMALL_TAILS " : "NO_TAILS ",
40790 replay_only(sb) ? "REPLAY_ONLY " : "",
40791 convert_reiserfs(sb) ? "CONV " : "",
40792 - atomic_read(&r->s_generation_counter),
40793 + atomic_read_unchecked(&r->s_generation_counter),
40794 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40795 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40796 SF(s_good_search_by_key_reada), SF(s_bmaps),
40797 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40798 struct journal_params *jp = &rs->s_v1.s_journal;
40799 char b[BDEVNAME_SIZE];
40800
40801 + pax_track_stack();
40802 +
40803 seq_printf(m, /* on-disk fields */
40804 "jp_journal_1st_block: \t%i\n"
40805 "jp_journal_dev: \t%s[%x]\n"
40806 diff -urNp linux-2.6.39.4/fs/reiserfs/stree.c linux-2.6.39.4/fs/reiserfs/stree.c
40807 --- linux-2.6.39.4/fs/reiserfs/stree.c 2011-05-19 00:06:34.000000000 -0400
40808 +++ linux-2.6.39.4/fs/reiserfs/stree.c 2011-08-05 19:44:37.000000000 -0400
40809 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40810 int iter = 0;
40811 #endif
40812
40813 + pax_track_stack();
40814 +
40815 BUG_ON(!th->t_trans_id);
40816
40817 init_tb_struct(th, &s_del_balance, sb, path,
40818 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40819 int retval;
40820 int quota_cut_bytes = 0;
40821
40822 + pax_track_stack();
40823 +
40824 BUG_ON(!th->t_trans_id);
40825
40826 le_key2cpu_key(&cpu_key, key);
40827 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40828 int quota_cut_bytes;
40829 loff_t tail_pos = 0;
40830
40831 + pax_track_stack();
40832 +
40833 BUG_ON(!th->t_trans_id);
40834
40835 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40836 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40837 int retval;
40838 int fs_gen;
40839
40840 + pax_track_stack();
40841 +
40842 BUG_ON(!th->t_trans_id);
40843
40844 fs_gen = get_generation(inode->i_sb);
40845 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40846 int fs_gen = 0;
40847 int quota_bytes = 0;
40848
40849 + pax_track_stack();
40850 +
40851 BUG_ON(!th->t_trans_id);
40852
40853 if (inode) { /* Do we count quotas for item? */
40854 diff -urNp linux-2.6.39.4/fs/reiserfs/super.c linux-2.6.39.4/fs/reiserfs/super.c
40855 --- linux-2.6.39.4/fs/reiserfs/super.c 2011-05-19 00:06:34.000000000 -0400
40856 +++ linux-2.6.39.4/fs/reiserfs/super.c 2011-08-05 19:44:37.000000000 -0400
40857 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40858 {.option_name = NULL}
40859 };
40860
40861 + pax_track_stack();
40862 +
40863 *blocks = 0;
40864 if (!options || !*options)
40865 /* use default configuration: create tails, journaling on, no
40866 diff -urNp linux-2.6.39.4/fs/select.c linux-2.6.39.4/fs/select.c
40867 --- linux-2.6.39.4/fs/select.c 2011-05-19 00:06:34.000000000 -0400
40868 +++ linux-2.6.39.4/fs/select.c 2011-08-05 19:44:37.000000000 -0400
40869 @@ -20,6 +20,7 @@
40870 #include <linux/module.h>
40871 #include <linux/slab.h>
40872 #include <linux/poll.h>
40873 +#include <linux/security.h>
40874 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40875 #include <linux/file.h>
40876 #include <linux/fdtable.h>
40877 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40878 int retval, i, timed_out = 0;
40879 unsigned long slack = 0;
40880
40881 + pax_track_stack();
40882 +
40883 rcu_read_lock();
40884 retval = max_select_fd(n, fds);
40885 rcu_read_unlock();
40886 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40887 /* Allocate small arguments on the stack to save memory and be faster */
40888 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40889
40890 + pax_track_stack();
40891 +
40892 ret = -EINVAL;
40893 if (n < 0)
40894 goto out_nofds;
40895 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40896 struct poll_list *walk = head;
40897 unsigned long todo = nfds;
40898
40899 + pax_track_stack();
40900 +
40901 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40902 if (nfds > rlimit(RLIMIT_NOFILE))
40903 return -EINVAL;
40904
40905 diff -urNp linux-2.6.39.4/fs/seq_file.c linux-2.6.39.4/fs/seq_file.c
40906 --- linux-2.6.39.4/fs/seq_file.c 2011-05-19 00:06:34.000000000 -0400
40907 +++ linux-2.6.39.4/fs/seq_file.c 2011-08-05 20:34:06.000000000 -0400
40908 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40909 return 0;
40910 }
40911 if (!m->buf) {
40912 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40913 + m->size = PAGE_SIZE;
40914 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40915 if (!m->buf)
40916 return -ENOMEM;
40917 }
40918 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40919 Eoverflow:
40920 m->op->stop(m, p);
40921 kfree(m->buf);
40922 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40923 + m->size <<= 1;
40924 + m->buf = kmalloc(m->size, GFP_KERNEL);
40925 return !m->buf ? -ENOMEM : -EAGAIN;
40926 }
40927
40928 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40929 m->version = file->f_version;
40930 /* grab buffer if we didn't have one */
40931 if (!m->buf) {
40932 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40933 + m->size = PAGE_SIZE;
40934 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40935 if (!m->buf)
40936 goto Enomem;
40937 }
40938 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40939 goto Fill;
40940 m->op->stop(m, p);
40941 kfree(m->buf);
40942 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40943 + m->size <<= 1;
40944 + m->buf = kmalloc(m->size, GFP_KERNEL);
40945 if (!m->buf)
40946 goto Enomem;
40947 m->count = 0;
40948 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40949 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40950 void *data)
40951 {
40952 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40953 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40954 int res = -ENOMEM;
40955
40956 if (op) {
40957 diff -urNp linux-2.6.39.4/fs/splice.c linux-2.6.39.4/fs/splice.c
40958 --- linux-2.6.39.4/fs/splice.c 2011-05-19 00:06:34.000000000 -0400
40959 +++ linux-2.6.39.4/fs/splice.c 2011-08-05 19:44:37.000000000 -0400
40960 @@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40961 pipe_lock(pipe);
40962
40963 for (;;) {
40964 - if (!pipe->readers) {
40965 + if (!atomic_read(&pipe->readers)) {
40966 send_sig(SIGPIPE, current, 0);
40967 if (!ret)
40968 ret = -EPIPE;
40969 @@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40970 do_wakeup = 0;
40971 }
40972
40973 - pipe->waiting_writers++;
40974 + atomic_inc(&pipe->waiting_writers);
40975 pipe_wait(pipe);
40976 - pipe->waiting_writers--;
40977 + atomic_dec(&pipe->waiting_writers);
40978 }
40979
40980 pipe_unlock(pipe);
40981 @@ -316,6 +316,8 @@ __generic_file_splice_read(struct file *
40982 .spd_release = spd_release_page,
40983 };
40984
40985 + pax_track_stack();
40986 +
40987 if (splice_grow_spd(pipe, &spd))
40988 return -ENOMEM;
40989
40990 @@ -556,7 +558,7 @@ static ssize_t kernel_readv(struct file
40991 old_fs = get_fs();
40992 set_fs(get_ds());
40993 /* The cast to a user pointer is valid due to the set_fs() */
40994 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40995 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40996 set_fs(old_fs);
40997
40998 return res;
40999 @@ -571,7 +573,7 @@ static ssize_t kernel_write(struct file
41000 old_fs = get_fs();
41001 set_fs(get_ds());
41002 /* The cast to a user pointer is valid due to the set_fs() */
41003 - res = vfs_write(file, (const char __user *)buf, count, &pos);
41004 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
41005 set_fs(old_fs);
41006
41007 return res;
41008 @@ -599,6 +601,8 @@ ssize_t default_file_splice_read(struct
41009 .spd_release = spd_release_page,
41010 };
41011
41012 + pax_track_stack();
41013 +
41014 if (splice_grow_spd(pipe, &spd))
41015 return -ENOMEM;
41016
41017 @@ -622,7 +626,7 @@ ssize_t default_file_splice_read(struct
41018 goto err;
41019
41020 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
41021 - vec[i].iov_base = (void __user *) page_address(page);
41022 + vec[i].iov_base = (__force void __user *) page_address(page);
41023 vec[i].iov_len = this_len;
41024 spd.pages[i] = page;
41025 spd.nr_pages++;
41026 @@ -842,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
41027 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
41028 {
41029 while (!pipe->nrbufs) {
41030 - if (!pipe->writers)
41031 + if (!atomic_read(&pipe->writers))
41032 return 0;
41033
41034 - if (!pipe->waiting_writers && sd->num_spliced)
41035 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
41036 return 0;
41037
41038 if (sd->flags & SPLICE_F_NONBLOCK)
41039 @@ -1178,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct fi
41040 * out of the pipe right after the splice_to_pipe(). So set
41041 * PIPE_READERS appropriately.
41042 */
41043 - pipe->readers = 1;
41044 + atomic_set(&pipe->readers, 1);
41045
41046 current->splice_pipe = pipe;
41047 }
41048 @@ -1615,6 +1619,8 @@ static long vmsplice_to_pipe(struct file
41049 };
41050 long ret;
41051
41052 + pax_track_stack();
41053 +
41054 pipe = get_pipe_info(file);
41055 if (!pipe)
41056 return -EBADF;
41057 @@ -1730,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_
41058 ret = -ERESTARTSYS;
41059 break;
41060 }
41061 - if (!pipe->writers)
41062 + if (!atomic_read(&pipe->writers))
41063 break;
41064 - if (!pipe->waiting_writers) {
41065 + if (!atomic_read(&pipe->waiting_writers)) {
41066 if (flags & SPLICE_F_NONBLOCK) {
41067 ret = -EAGAIN;
41068 break;
41069 @@ -1764,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_
41070 pipe_lock(pipe);
41071
41072 while (pipe->nrbufs >= pipe->buffers) {
41073 - if (!pipe->readers) {
41074 + if (!atomic_read(&pipe->readers)) {
41075 send_sig(SIGPIPE, current, 0);
41076 ret = -EPIPE;
41077 break;
41078 @@ -1777,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_
41079 ret = -ERESTARTSYS;
41080 break;
41081 }
41082 - pipe->waiting_writers++;
41083 + atomic_inc(&pipe->waiting_writers);
41084 pipe_wait(pipe);
41085 - pipe->waiting_writers--;
41086 + atomic_dec(&pipe->waiting_writers);
41087 }
41088
41089 pipe_unlock(pipe);
41090 @@ -1815,14 +1821,14 @@ retry:
41091 pipe_double_lock(ipipe, opipe);
41092
41093 do {
41094 - if (!opipe->readers) {
41095 + if (!atomic_read(&opipe->readers)) {
41096 send_sig(SIGPIPE, current, 0);
41097 if (!ret)
41098 ret = -EPIPE;
41099 break;
41100 }
41101
41102 - if (!ipipe->nrbufs && !ipipe->writers)
41103 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
41104 break;
41105
41106 /*
41107 @@ -1922,7 +1928,7 @@ static int link_pipe(struct pipe_inode_i
41108 pipe_double_lock(ipipe, opipe);
41109
41110 do {
41111 - if (!opipe->readers) {
41112 + if (!atomic_read(&opipe->readers)) {
41113 send_sig(SIGPIPE, current, 0);
41114 if (!ret)
41115 ret = -EPIPE;
41116 @@ -1967,7 +1973,7 @@ static int link_pipe(struct pipe_inode_i
41117 * return EAGAIN if we have the potential of some data in the
41118 * future, otherwise just return 0
41119 */
41120 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
41121 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
41122 ret = -EAGAIN;
41123
41124 pipe_unlock(ipipe);
41125 diff -urNp linux-2.6.39.4/fs/sysfs/file.c linux-2.6.39.4/fs/sysfs/file.c
41126 --- linux-2.6.39.4/fs/sysfs/file.c 2011-05-19 00:06:34.000000000 -0400
41127 +++ linux-2.6.39.4/fs/sysfs/file.c 2011-08-05 19:44:37.000000000 -0400
41128 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
41129
41130 struct sysfs_open_dirent {
41131 atomic_t refcnt;
41132 - atomic_t event;
41133 + atomic_unchecked_t event;
41134 wait_queue_head_t poll;
41135 struct list_head buffers; /* goes through sysfs_buffer.list */
41136 };
41137 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
41138 if (!sysfs_get_active(attr_sd))
41139 return -ENODEV;
41140
41141 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41142 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41143 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41144
41145 sysfs_put_active(attr_sd);
41146 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
41147 return -ENOMEM;
41148
41149 atomic_set(&new_od->refcnt, 0);
41150 - atomic_set(&new_od->event, 1);
41151 + atomic_set_unchecked(&new_od->event, 1);
41152 init_waitqueue_head(&new_od->poll);
41153 INIT_LIST_HEAD(&new_od->buffers);
41154 goto retry;
41155 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
41156
41157 sysfs_put_active(attr_sd);
41158
41159 - if (buffer->event != atomic_read(&od->event))
41160 + if (buffer->event != atomic_read_unchecked(&od->event))
41161 goto trigger;
41162
41163 return DEFAULT_POLLMASK;
41164 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
41165
41166 od = sd->s_attr.open;
41167 if (od) {
41168 - atomic_inc(&od->event);
41169 + atomic_inc_unchecked(&od->event);
41170 wake_up_interruptible(&od->poll);
41171 }
41172
41173 diff -urNp linux-2.6.39.4/fs/sysfs/mount.c linux-2.6.39.4/fs/sysfs/mount.c
41174 --- linux-2.6.39.4/fs/sysfs/mount.c 2011-05-19 00:06:34.000000000 -0400
41175 +++ linux-2.6.39.4/fs/sysfs/mount.c 2011-08-05 19:44:37.000000000 -0400
41176 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41177 .s_name = "",
41178 .s_count = ATOMIC_INIT(1),
41179 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41180 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41181 + .s_mode = S_IFDIR | S_IRWXU,
41182 +#else
41183 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41184 +#endif
41185 .s_ino = 1,
41186 };
41187
41188 diff -urNp linux-2.6.39.4/fs/sysfs/symlink.c linux-2.6.39.4/fs/sysfs/symlink.c
41189 --- linux-2.6.39.4/fs/sysfs/symlink.c 2011-05-19 00:06:34.000000000 -0400
41190 +++ linux-2.6.39.4/fs/sysfs/symlink.c 2011-08-05 19:44:37.000000000 -0400
41191 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41192
41193 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41194 {
41195 - char *page = nd_get_link(nd);
41196 + const char *page = nd_get_link(nd);
41197 if (!IS_ERR(page))
41198 free_page((unsigned long)page);
41199 }
41200 diff -urNp linux-2.6.39.4/fs/udf/inode.c linux-2.6.39.4/fs/udf/inode.c
41201 --- linux-2.6.39.4/fs/udf/inode.c 2011-05-19 00:06:34.000000000 -0400
41202 +++ linux-2.6.39.4/fs/udf/inode.c 2011-08-05 19:44:37.000000000 -0400
41203 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41204 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41205 int lastblock = 0;
41206
41207 + pax_track_stack();
41208 +
41209 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41210 prev_epos.block = iinfo->i_location;
41211 prev_epos.bh = NULL;
41212 diff -urNp linux-2.6.39.4/fs/udf/misc.c linux-2.6.39.4/fs/udf/misc.c
41213 --- linux-2.6.39.4/fs/udf/misc.c 2011-05-19 00:06:34.000000000 -0400
41214 +++ linux-2.6.39.4/fs/udf/misc.c 2011-08-05 19:44:37.000000000 -0400
41215 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41216
41217 u8 udf_tag_checksum(const struct tag *t)
41218 {
41219 - u8 *data = (u8 *)t;
41220 + const u8 *data = (const u8 *)t;
41221 u8 checksum = 0;
41222 int i;
41223 for (i = 0; i < sizeof(struct tag); ++i)
41224 diff -urNp linux-2.6.39.4/fs/utimes.c linux-2.6.39.4/fs/utimes.c
41225 --- linux-2.6.39.4/fs/utimes.c 2011-05-19 00:06:34.000000000 -0400
41226 +++ linux-2.6.39.4/fs/utimes.c 2011-08-05 19:44:37.000000000 -0400
41227 @@ -1,6 +1,7 @@
41228 #include <linux/compiler.h>
41229 #include <linux/file.h>
41230 #include <linux/fs.h>
41231 +#include <linux/security.h>
41232 #include <linux/linkage.h>
41233 #include <linux/mount.h>
41234 #include <linux/namei.h>
41235 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41236 goto mnt_drop_write_and_out;
41237 }
41238 }
41239 +
41240 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41241 + error = -EACCES;
41242 + goto mnt_drop_write_and_out;
41243 + }
41244 +
41245 mutex_lock(&inode->i_mutex);
41246 error = notify_change(path->dentry, &newattrs);
41247 mutex_unlock(&inode->i_mutex);
41248 diff -urNp linux-2.6.39.4/fs/xattr_acl.c linux-2.6.39.4/fs/xattr_acl.c
41249 --- linux-2.6.39.4/fs/xattr_acl.c 2011-05-19 00:06:34.000000000 -0400
41250 +++ linux-2.6.39.4/fs/xattr_acl.c 2011-08-05 19:44:37.000000000 -0400
41251 @@ -17,8 +17,8 @@
41252 struct posix_acl *
41253 posix_acl_from_xattr(const void *value, size_t size)
41254 {
41255 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41256 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41257 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41258 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41259 int count;
41260 struct posix_acl *acl;
41261 struct posix_acl_entry *acl_e;
41262 diff -urNp linux-2.6.39.4/fs/xattr.c linux-2.6.39.4/fs/xattr.c
41263 --- linux-2.6.39.4/fs/xattr.c 2011-05-19 00:06:34.000000000 -0400
41264 +++ linux-2.6.39.4/fs/xattr.c 2011-08-05 19:44:37.000000000 -0400
41265 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41266 * Extended attribute SET operations
41267 */
41268 static long
41269 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
41270 +setxattr(struct path *path, const char __user *name, const void __user *value,
41271 size_t size, int flags)
41272 {
41273 int error;
41274 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
41275 return PTR_ERR(kvalue);
41276 }
41277
41278 - error = vfs_setxattr(d, kname, kvalue, size, flags);
41279 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41280 + error = -EACCES;
41281 + goto out;
41282 + }
41283 +
41284 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41285 +out:
41286 kfree(kvalue);
41287 return error;
41288 }
41289 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41290 return error;
41291 error = mnt_want_write(path.mnt);
41292 if (!error) {
41293 - error = setxattr(path.dentry, name, value, size, flags);
41294 + error = setxattr(&path, name, value, size, flags);
41295 mnt_drop_write(path.mnt);
41296 }
41297 path_put(&path);
41298 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41299 return error;
41300 error = mnt_want_write(path.mnt);
41301 if (!error) {
41302 - error = setxattr(path.dentry, name, value, size, flags);
41303 + error = setxattr(&path, name, value, size, flags);
41304 mnt_drop_write(path.mnt);
41305 }
41306 path_put(&path);
41307 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41308 const void __user *,value, size_t, size, int, flags)
41309 {
41310 struct file *f;
41311 - struct dentry *dentry;
41312 int error = -EBADF;
41313
41314 f = fget(fd);
41315 if (!f)
41316 return error;
41317 - dentry = f->f_path.dentry;
41318 - audit_inode(NULL, dentry);
41319 + audit_inode(NULL, f->f_path.dentry);
41320 error = mnt_want_write_file(f);
41321 if (!error) {
41322 - error = setxattr(dentry, name, value, size, flags);
41323 + error = setxattr(&f->f_path, name, value, size, flags);
41324 mnt_drop_write(f->f_path.mnt);
41325 }
41326 fput(f);
41327 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c
41328 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-05-19 00:06:34.000000000 -0400
41329 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-05 19:44:37.000000000 -0400
41330 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41331 xfs_fsop_geom_t fsgeo;
41332 int error;
41333
41334 + memset(&fsgeo, 0, sizeof(fsgeo));
41335 error = xfs_fs_geometry(mp, &fsgeo, 3);
41336 if (error)
41337 return -error;
41338 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c
41339 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-05-19 00:06:34.000000000 -0400
41340 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-05 19:44:37.000000000 -0400
41341 @@ -128,7 +128,7 @@ xfs_find_handle(
41342 }
41343
41344 error = -EFAULT;
41345 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41346 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41347 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41348 goto out_put;
41349
41350 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c
41351 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-05-19 00:06:34.000000000 -0400
41352 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-05 19:44:37.000000000 -0400
41353 @@ -437,7 +437,7 @@ xfs_vn_put_link(
41354 struct nameidata *nd,
41355 void *p)
41356 {
41357 - char *s = nd_get_link(nd);
41358 + const char *s = nd_get_link(nd);
41359
41360 if (!IS_ERR(s))
41361 kfree(s);
41362 diff -urNp linux-2.6.39.4/fs/xfs/xfs_bmap.c linux-2.6.39.4/fs/xfs/xfs_bmap.c
41363 --- linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-05-19 00:06:34.000000000 -0400
41364 +++ linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-08-05 19:44:37.000000000 -0400
41365 @@ -287,7 +287,7 @@ xfs_bmap_validate_ret(
41366 int nmap,
41367 int ret_nmap);
41368 #else
41369 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41370 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41371 #endif /* DEBUG */
41372
41373 STATIC int
41374 diff -urNp linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c
41375 --- linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-05-19 00:06:34.000000000 -0400
41376 +++ linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-08-05 19:44:37.000000000 -0400
41377 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41378 }
41379
41380 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41381 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41382 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41383 + char name[sfep->namelen];
41384 + memcpy(name, sfep->name, sfep->namelen);
41385 + if (filldir(dirent, name, sfep->namelen,
41386 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
41387 + *offset = off & 0x7fffffff;
41388 + return 0;
41389 + }
41390 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41391 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41392 *offset = off & 0x7fffffff;
41393 return 0;
41394 diff -urNp linux-2.6.39.4/grsecurity/gracl_alloc.c linux-2.6.39.4/grsecurity/gracl_alloc.c
41395 --- linux-2.6.39.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41396 +++ linux-2.6.39.4/grsecurity/gracl_alloc.c 2011-08-05 19:44:37.000000000 -0400
41397 @@ -0,0 +1,105 @@
41398 +#include <linux/kernel.h>
41399 +#include <linux/mm.h>
41400 +#include <linux/slab.h>
41401 +#include <linux/vmalloc.h>
41402 +#include <linux/gracl.h>
41403 +#include <linux/grsecurity.h>
41404 +
41405 +static unsigned long alloc_stack_next = 1;
41406 +static unsigned long alloc_stack_size = 1;
41407 +static void **alloc_stack;
41408 +
41409 +static __inline__ int
41410 +alloc_pop(void)
41411 +{
41412 + if (alloc_stack_next == 1)
41413 + return 0;
41414 +
41415 + kfree(alloc_stack[alloc_stack_next - 2]);
41416 +
41417 + alloc_stack_next--;
41418 +
41419 + return 1;
41420 +}
41421 +
41422 +static __inline__ int
41423 +alloc_push(void *buf)
41424 +{
41425 + if (alloc_stack_next >= alloc_stack_size)
41426 + return 1;
41427 +
41428 + alloc_stack[alloc_stack_next - 1] = buf;
41429 +
41430 + alloc_stack_next++;
41431 +
41432 + return 0;
41433 +}
41434 +
41435 +void *
41436 +acl_alloc(unsigned long len)
41437 +{
41438 + void *ret = NULL;
41439 +
41440 + if (!len || len > PAGE_SIZE)
41441 + goto out;
41442 +
41443 + ret = kmalloc(len, GFP_KERNEL);
41444 +
41445 + if (ret) {
41446 + if (alloc_push(ret)) {
41447 + kfree(ret);
41448 + ret = NULL;
41449 + }
41450 + }
41451 +
41452 +out:
41453 + return ret;
41454 +}
41455 +
41456 +void *
41457 +acl_alloc_num(unsigned long num, unsigned long len)
41458 +{
41459 + if (!len || (num > (PAGE_SIZE / len)))
41460 + return NULL;
41461 +
41462 + return acl_alloc(num * len);
41463 +}
41464 +
41465 +void
41466 +acl_free_all(void)
41467 +{
41468 + if (gr_acl_is_enabled() || !alloc_stack)
41469 + return;
41470 +
41471 + while (alloc_pop()) ;
41472 +
41473 + if (alloc_stack) {
41474 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41475 + kfree(alloc_stack);
41476 + else
41477 + vfree(alloc_stack);
41478 + }
41479 +
41480 + alloc_stack = NULL;
41481 + alloc_stack_size = 1;
41482 + alloc_stack_next = 1;
41483 +
41484 + return;
41485 +}
41486 +
41487 +int
41488 +acl_alloc_stack_init(unsigned long size)
41489 +{
41490 + if ((size * sizeof (void *)) <= PAGE_SIZE)
41491 + alloc_stack =
41492 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41493 + else
41494 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
41495 +
41496 + alloc_stack_size = size;
41497 +
41498 + if (!alloc_stack)
41499 + return 0;
41500 + else
41501 + return 1;
41502 +}
41503 diff -urNp linux-2.6.39.4/grsecurity/gracl.c linux-2.6.39.4/grsecurity/gracl.c
41504 --- linux-2.6.39.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41505 +++ linux-2.6.39.4/grsecurity/gracl.c 2011-08-05 19:44:37.000000000 -0400
41506 @@ -0,0 +1,4106 @@
41507 +#include <linux/kernel.h>
41508 +#include <linux/module.h>
41509 +#include <linux/sched.h>
41510 +#include <linux/mm.h>
41511 +#include <linux/file.h>
41512 +#include <linux/fs.h>
41513 +#include <linux/namei.h>
41514 +#include <linux/mount.h>
41515 +#include <linux/tty.h>
41516 +#include <linux/proc_fs.h>
41517 +#include <linux/lglock.h>
41518 +#include <linux/slab.h>
41519 +#include <linux/vmalloc.h>
41520 +#include <linux/types.h>
41521 +#include <linux/sysctl.h>
41522 +#include <linux/netdevice.h>
41523 +#include <linux/ptrace.h>
41524 +#include <linux/gracl.h>
41525 +#include <linux/gralloc.h>
41526 +#include <linux/grsecurity.h>
41527 +#include <linux/grinternal.h>
41528 +#include <linux/pid_namespace.h>
41529 +#include <linux/fdtable.h>
41530 +#include <linux/percpu.h>
41531 +
41532 +#include <asm/uaccess.h>
41533 +#include <asm/errno.h>
41534 +#include <asm/mman.h>
41535 +
41536 +static struct acl_role_db acl_role_set;
41537 +static struct name_db name_set;
41538 +static struct inodev_db inodev_set;
41539 +
41540 +/* for keeping track of userspace pointers used for subjects, so we
41541 + can share references in the kernel as well
41542 +*/
41543 +
41544 +static struct path real_root;
41545 +
41546 +static struct acl_subj_map_db subj_map_set;
41547 +
41548 +static struct acl_role_label *default_role;
41549 +
41550 +static struct acl_role_label *role_list;
41551 +
41552 +static u16 acl_sp_role_value;
41553 +
41554 +extern char *gr_shared_page[4];
41555 +static DEFINE_MUTEX(gr_dev_mutex);
41556 +DEFINE_RWLOCK(gr_inode_lock);
41557 +
41558 +struct gr_arg *gr_usermode;
41559 +
41560 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
41561 +
41562 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41563 +extern void gr_clear_learn_entries(void);
41564 +
41565 +#ifdef CONFIG_GRKERNSEC_RESLOG
41566 +extern void gr_log_resource(const struct task_struct *task,
41567 + const int res, const unsigned long wanted, const int gt);
41568 +#endif
41569 +
41570 +unsigned char *gr_system_salt;
41571 +unsigned char *gr_system_sum;
41572 +
41573 +static struct sprole_pw **acl_special_roles = NULL;
41574 +static __u16 num_sprole_pws = 0;
41575 +
41576 +static struct acl_role_label *kernel_role = NULL;
41577 +
41578 +static unsigned int gr_auth_attempts = 0;
41579 +static unsigned long gr_auth_expires = 0UL;
41580 +
41581 +#ifdef CONFIG_NET
41582 +extern struct vfsmount *sock_mnt;
41583 +#endif
41584 +
41585 +extern struct vfsmount *pipe_mnt;
41586 +extern struct vfsmount *shm_mnt;
41587 +#ifdef CONFIG_HUGETLBFS
41588 +extern struct vfsmount *hugetlbfs_vfsmount;
41589 +#endif
41590 +
41591 +static struct acl_object_label *fakefs_obj_rw;
41592 +static struct acl_object_label *fakefs_obj_rwx;
41593 +
41594 +extern int gr_init_uidset(void);
41595 +extern void gr_free_uidset(void);
41596 +extern void gr_remove_uid(uid_t uid);
41597 +extern int gr_find_uid(uid_t uid);
41598 +
41599 +DECLARE_BRLOCK(vfsmount_lock);
41600 +
41601 +__inline__ int
41602 +gr_acl_is_enabled(void)
41603 +{
41604 + return (gr_status & GR_READY);
41605 +}
41606 +
41607 +#ifdef CONFIG_BTRFS_FS
41608 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41609 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41610 +#endif
41611 +
41612 +static inline dev_t __get_dev(const struct dentry *dentry)
41613 +{
41614 +#ifdef CONFIG_BTRFS_FS
41615 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41616 + return get_btrfs_dev_from_inode(dentry->d_inode);
41617 + else
41618 +#endif
41619 + return dentry->d_inode->i_sb->s_dev;
41620 +}
41621 +
41622 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41623 +{
41624 + return __get_dev(dentry);
41625 +}
41626 +
41627 +static char gr_task_roletype_to_char(struct task_struct *task)
41628 +{
41629 + switch (task->role->roletype &
41630 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41631 + GR_ROLE_SPECIAL)) {
41632 + case GR_ROLE_DEFAULT:
41633 + return 'D';
41634 + case GR_ROLE_USER:
41635 + return 'U';
41636 + case GR_ROLE_GROUP:
41637 + return 'G';
41638 + case GR_ROLE_SPECIAL:
41639 + return 'S';
41640 + }
41641 +
41642 + return 'X';
41643 +}
41644 +
41645 +char gr_roletype_to_char(void)
41646 +{
41647 + return gr_task_roletype_to_char(current);
41648 +}
41649 +
41650 +__inline__ int
41651 +gr_acl_tpe_check(void)
41652 +{
41653 + if (unlikely(!(gr_status & GR_READY)))
41654 + return 0;
41655 + if (current->role->roletype & GR_ROLE_TPE)
41656 + return 1;
41657 + else
41658 + return 0;
41659 +}
41660 +
41661 +int
41662 +gr_handle_rawio(const struct inode *inode)
41663 +{
41664 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41665 + if (inode && S_ISBLK(inode->i_mode) &&
41666 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41667 + !capable(CAP_SYS_RAWIO))
41668 + return 1;
41669 +#endif
41670 + return 0;
41671 +}
41672 +
41673 +static int
41674 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41675 +{
41676 + if (likely(lena != lenb))
41677 + return 0;
41678 +
41679 + return !memcmp(a, b, lena);
41680 +}
41681 +
41682 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41683 +{
41684 + *buflen -= namelen;
41685 + if (*buflen < 0)
41686 + return -ENAMETOOLONG;
41687 + *buffer -= namelen;
41688 + memcpy(*buffer, str, namelen);
41689 + return 0;
41690 +}
41691 +
41692 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41693 +{
41694 + return prepend(buffer, buflen, name->name, name->len);
41695 +}
41696 +
41697 +static int prepend_path(const struct path *path, struct path *root,
41698 + char **buffer, int *buflen)
41699 +{
41700 + struct dentry *dentry = path->dentry;
41701 + struct vfsmount *vfsmnt = path->mnt;
41702 + bool slash = false;
41703 + int error = 0;
41704 +
41705 + while (dentry != root->dentry || vfsmnt != root->mnt) {
41706 + struct dentry * parent;
41707 +
41708 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41709 + /* Global root? */
41710 + if (vfsmnt->mnt_parent == vfsmnt) {
41711 + goto out;
41712 + }
41713 + dentry = vfsmnt->mnt_mountpoint;
41714 + vfsmnt = vfsmnt->mnt_parent;
41715 + continue;
41716 + }
41717 + parent = dentry->d_parent;
41718 + prefetch(parent);
41719 + spin_lock(&dentry->d_lock);
41720 + error = prepend_name(buffer, buflen, &dentry->d_name);
41721 + spin_unlock(&dentry->d_lock);
41722 + if (!error)
41723 + error = prepend(buffer, buflen, "/", 1);
41724 + if (error)
41725 + break;
41726 +
41727 + slash = true;
41728 + dentry = parent;
41729 + }
41730 +
41731 +out:
41732 + if (!error && !slash)
41733 + error = prepend(buffer, buflen, "/", 1);
41734 +
41735 + return error;
41736 +}
41737 +
41738 +/* this must be called with vfsmount_lock and rename_lock held */
41739 +
41740 +static char *__our_d_path(const struct path *path, struct path *root,
41741 + char *buf, int buflen)
41742 +{
41743 + char *res = buf + buflen;
41744 + int error;
41745 +
41746 + prepend(&res, &buflen, "\0", 1);
41747 + error = prepend_path(path, root, &res, &buflen);
41748 + if (error)
41749 + return ERR_PTR(error);
41750 +
41751 + return res;
41752 +}
41753 +
41754 +static char *
41755 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41756 +{
41757 + char *retval;
41758 +
41759 + retval = __our_d_path(path, root, buf, buflen);
41760 + if (unlikely(IS_ERR(retval)))
41761 + retval = strcpy(buf, "<path too long>");
41762 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41763 + retval[1] = '\0';
41764 +
41765 + return retval;
41766 +}
41767 +
41768 +static char *
41769 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41770 + char *buf, int buflen)
41771 +{
41772 + struct path path;
41773 + char *res;
41774 +
41775 + path.dentry = (struct dentry *)dentry;
41776 + path.mnt = (struct vfsmount *)vfsmnt;
41777 +
41778 + /* we can use real_root.dentry, real_root.mnt, because this is only called
41779 + by the RBAC system */
41780 + res = gen_full_path(&path, &real_root, buf, buflen);
41781 +
41782 + return res;
41783 +}
41784 +
41785 +static char *
41786 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41787 + char *buf, int buflen)
41788 +{
41789 + char *res;
41790 + struct path path;
41791 + struct path root;
41792 + struct task_struct *reaper = &init_task;
41793 +
41794 + path.dentry = (struct dentry *)dentry;
41795 + path.mnt = (struct vfsmount *)vfsmnt;
41796 +
41797 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41798 + get_fs_root(reaper->fs, &root);
41799 +
41800 + write_seqlock(&rename_lock);
41801 + br_read_lock(vfsmount_lock);
41802 + res = gen_full_path(&path, &root, buf, buflen);
41803 + br_read_unlock(vfsmount_lock);
41804 + write_sequnlock(&rename_lock);
41805 +
41806 + path_put(&root);
41807 + return res;
41808 +}
41809 +
41810 +static char *
41811 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41812 +{
41813 + char *ret;
41814 + write_seqlock(&rename_lock);
41815 + br_read_lock(vfsmount_lock);
41816 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41817 + PAGE_SIZE);
41818 + br_read_unlock(vfsmount_lock);
41819 + write_sequnlock(&rename_lock);
41820 + return ret;
41821 +}
41822 +
41823 +char *
41824 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41825 +{
41826 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41827 + PAGE_SIZE);
41828 +}
41829 +
41830 +char *
41831 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41832 +{
41833 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41834 + PAGE_SIZE);
41835 +}
41836 +
41837 +char *
41838 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41839 +{
41840 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41841 + PAGE_SIZE);
41842 +}
41843 +
41844 +char *
41845 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41846 +{
41847 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41848 + PAGE_SIZE);
41849 +}
41850 +
41851 +char *
41852 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41853 +{
41854 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41855 + PAGE_SIZE);
41856 +}
41857 +
41858 +__inline__ __u32
41859 +to_gr_audit(const __u32 reqmode)
41860 +{
41861 + /* masks off auditable permission flags, then shifts them to create
41862 + auditing flags, and adds the special case of append auditing if
41863 + we're requesting write */
41864 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41865 +}
41866 +
41867 +struct acl_subject_label *
41868 +lookup_subject_map(const struct acl_subject_label *userp)
41869 +{
41870 + unsigned int index = shash(userp, subj_map_set.s_size);
41871 + struct subject_map *match;
41872 +
41873 + match = subj_map_set.s_hash[index];
41874 +
41875 + while (match && match->user != userp)
41876 + match = match->next;
41877 +
41878 + if (match != NULL)
41879 + return match->kernel;
41880 + else
41881 + return NULL;
41882 +}
41883 +
41884 +static void
41885 +insert_subj_map_entry(struct subject_map *subjmap)
41886 +{
41887 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41888 + struct subject_map **curr;
41889 +
41890 + subjmap->prev = NULL;
41891 +
41892 + curr = &subj_map_set.s_hash[index];
41893 + if (*curr != NULL)
41894 + (*curr)->prev = subjmap;
41895 +
41896 + subjmap->next = *curr;
41897 + *curr = subjmap;
41898 +
41899 + return;
41900 +}
41901 +
41902 +static struct acl_role_label *
41903 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41904 + const gid_t gid)
41905 +{
41906 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41907 + struct acl_role_label *match;
41908 + struct role_allowed_ip *ipp;
41909 + unsigned int x;
41910 + u32 curr_ip = task->signal->curr_ip;
41911 +
41912 + task->signal->saved_ip = curr_ip;
41913 +
41914 + match = acl_role_set.r_hash[index];
41915 +
41916 + while (match) {
41917 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41918 + for (x = 0; x < match->domain_child_num; x++) {
41919 + if (match->domain_children[x] == uid)
41920 + goto found;
41921 + }
41922 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41923 + break;
41924 + match = match->next;
41925 + }
41926 +found:
41927 + if (match == NULL) {
41928 + try_group:
41929 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41930 + match = acl_role_set.r_hash[index];
41931 +
41932 + while (match) {
41933 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41934 + for (x = 0; x < match->domain_child_num; x++) {
41935 + if (match->domain_children[x] == gid)
41936 + goto found2;
41937 + }
41938 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41939 + break;
41940 + match = match->next;
41941 + }
41942 +found2:
41943 + if (match == NULL)
41944 + match = default_role;
41945 + if (match->allowed_ips == NULL)
41946 + return match;
41947 + else {
41948 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41949 + if (likely
41950 + ((ntohl(curr_ip) & ipp->netmask) ==
41951 + (ntohl(ipp->addr) & ipp->netmask)))
41952 + return match;
41953 + }
41954 + match = default_role;
41955 + }
41956 + } else if (match->allowed_ips == NULL) {
41957 + return match;
41958 + } else {
41959 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41960 + if (likely
41961 + ((ntohl(curr_ip) & ipp->netmask) ==
41962 + (ntohl(ipp->addr) & ipp->netmask)))
41963 + return match;
41964 + }
41965 + goto try_group;
41966 + }
41967 +
41968 + return match;
41969 +}
41970 +
41971 +struct acl_subject_label *
41972 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41973 + const struct acl_role_label *role)
41974 +{
41975 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41976 + struct acl_subject_label *match;
41977 +
41978 + match = role->subj_hash[index];
41979 +
41980 + while (match && (match->inode != ino || match->device != dev ||
41981 + (match->mode & GR_DELETED))) {
41982 + match = match->next;
41983 + }
41984 +
41985 + if (match && !(match->mode & GR_DELETED))
41986 + return match;
41987 + else
41988 + return NULL;
41989 +}
41990 +
41991 +struct acl_subject_label *
41992 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41993 + const struct acl_role_label *role)
41994 +{
41995 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41996 + struct acl_subject_label *match;
41997 +
41998 + match = role->subj_hash[index];
41999 +
42000 + while (match && (match->inode != ino || match->device != dev ||
42001 + !(match->mode & GR_DELETED))) {
42002 + match = match->next;
42003 + }
42004 +
42005 + if (match && (match->mode & GR_DELETED))
42006 + return match;
42007 + else
42008 + return NULL;
42009 +}
42010 +
42011 +static struct acl_object_label *
42012 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
42013 + const struct acl_subject_label *subj)
42014 +{
42015 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
42016 + struct acl_object_label *match;
42017 +
42018 + match = subj->obj_hash[index];
42019 +
42020 + while (match && (match->inode != ino || match->device != dev ||
42021 + (match->mode & GR_DELETED))) {
42022 + match = match->next;
42023 + }
42024 +
42025 + if (match && !(match->mode & GR_DELETED))
42026 + return match;
42027 + else
42028 + return NULL;
42029 +}
42030 +
42031 +static struct acl_object_label *
42032 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
42033 + const struct acl_subject_label *subj)
42034 +{
42035 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
42036 + struct acl_object_label *match;
42037 +
42038 + match = subj->obj_hash[index];
42039 +
42040 + while (match && (match->inode != ino || match->device != dev ||
42041 + !(match->mode & GR_DELETED))) {
42042 + match = match->next;
42043 + }
42044 +
42045 + if (match && (match->mode & GR_DELETED))
42046 + return match;
42047 +
42048 + match = subj->obj_hash[index];
42049 +
42050 + while (match && (match->inode != ino || match->device != dev ||
42051 + (match->mode & GR_DELETED))) {
42052 + match = match->next;
42053 + }
42054 +
42055 + if (match && !(match->mode & GR_DELETED))
42056 + return match;
42057 + else
42058 + return NULL;
42059 +}
42060 +
42061 +static struct name_entry *
42062 +lookup_name_entry(const char *name)
42063 +{
42064 + unsigned int len = strlen(name);
42065 + unsigned int key = full_name_hash(name, len);
42066 + unsigned int index = key % name_set.n_size;
42067 + struct name_entry *match;
42068 +
42069 + match = name_set.n_hash[index];
42070 +
42071 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
42072 + match = match->next;
42073 +
42074 + return match;
42075 +}
42076 +
42077 +static struct name_entry *
42078 +lookup_name_entry_create(const char *name)
42079 +{
42080 + unsigned int len = strlen(name);
42081 + unsigned int key = full_name_hash(name, len);
42082 + unsigned int index = key % name_set.n_size;
42083 + struct name_entry *match;
42084 +
42085 + match = name_set.n_hash[index];
42086 +
42087 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42088 + !match->deleted))
42089 + match = match->next;
42090 +
42091 + if (match && match->deleted)
42092 + return match;
42093 +
42094 + match = name_set.n_hash[index];
42095 +
42096 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42097 + match->deleted))
42098 + match = match->next;
42099 +
42100 + if (match && !match->deleted)
42101 + return match;
42102 + else
42103 + return NULL;
42104 +}
42105 +
42106 +static struct inodev_entry *
42107 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
42108 +{
42109 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
42110 + struct inodev_entry *match;
42111 +
42112 + match = inodev_set.i_hash[index];
42113 +
42114 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
42115 + match = match->next;
42116 +
42117 + return match;
42118 +}
42119 +
42120 +static void
42121 +insert_inodev_entry(struct inodev_entry *entry)
42122 +{
42123 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
42124 + inodev_set.i_size);
42125 + struct inodev_entry **curr;
42126 +
42127 + entry->prev = NULL;
42128 +
42129 + curr = &inodev_set.i_hash[index];
42130 + if (*curr != NULL)
42131 + (*curr)->prev = entry;
42132 +
42133 + entry->next = *curr;
42134 + *curr = entry;
42135 +
42136 + return;
42137 +}
42138 +
42139 +static void
42140 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42141 +{
42142 + unsigned int index =
42143 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42144 + struct acl_role_label **curr;
42145 + struct acl_role_label *tmp;
42146 +
42147 + curr = &acl_role_set.r_hash[index];
42148 +
42149 + /* if role was already inserted due to domains and already has
42150 + a role in the same bucket as it attached, then we need to
42151 + combine these two buckets
42152 + */
42153 + if (role->next) {
42154 + tmp = role->next;
42155 + while (tmp->next)
42156 + tmp = tmp->next;
42157 + tmp->next = *curr;
42158 + } else
42159 + role->next = *curr;
42160 + *curr = role;
42161 +
42162 + return;
42163 +}
42164 +
42165 +static void
42166 +insert_acl_role_label(struct acl_role_label *role)
42167 +{
42168 + int i;
42169 +
42170 + if (role_list == NULL) {
42171 + role_list = role;
42172 + role->prev = NULL;
42173 + } else {
42174 + role->prev = role_list;
42175 + role_list = role;
42176 + }
42177 +
42178 + /* used for hash chains */
42179 + role->next = NULL;
42180 +
42181 + if (role->roletype & GR_ROLE_DOMAIN) {
42182 + for (i = 0; i < role->domain_child_num; i++)
42183 + __insert_acl_role_label(role, role->domain_children[i]);
42184 + } else
42185 + __insert_acl_role_label(role, role->uidgid);
42186 +}
42187 +
42188 +static int
42189 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42190 +{
42191 + struct name_entry **curr, *nentry;
42192 + struct inodev_entry *ientry;
42193 + unsigned int len = strlen(name);
42194 + unsigned int key = full_name_hash(name, len);
42195 + unsigned int index = key % name_set.n_size;
42196 +
42197 + curr = &name_set.n_hash[index];
42198 +
42199 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42200 + curr = &((*curr)->next);
42201 +
42202 + if (*curr != NULL)
42203 + return 1;
42204 +
42205 + nentry = acl_alloc(sizeof (struct name_entry));
42206 + if (nentry == NULL)
42207 + return 0;
42208 + ientry = acl_alloc(sizeof (struct inodev_entry));
42209 + if (ientry == NULL)
42210 + return 0;
42211 + ientry->nentry = nentry;
42212 +
42213 + nentry->key = key;
42214 + nentry->name = name;
42215 + nentry->inode = inode;
42216 + nentry->device = device;
42217 + nentry->len = len;
42218 + nentry->deleted = deleted;
42219 +
42220 + nentry->prev = NULL;
42221 + curr = &name_set.n_hash[index];
42222 + if (*curr != NULL)
42223 + (*curr)->prev = nentry;
42224 + nentry->next = *curr;
42225 + *curr = nentry;
42226 +
42227 + /* insert us into the table searchable by inode/dev */
42228 + insert_inodev_entry(ientry);
42229 +
42230 + return 1;
42231 +}
42232 +
42233 +static void
42234 +insert_acl_obj_label(struct acl_object_label *obj,
42235 + struct acl_subject_label *subj)
42236 +{
42237 + unsigned int index =
42238 + fhash(obj->inode, obj->device, subj->obj_hash_size);
42239 + struct acl_object_label **curr;
42240 +
42241 +
42242 + obj->prev = NULL;
42243 +
42244 + curr = &subj->obj_hash[index];
42245 + if (*curr != NULL)
42246 + (*curr)->prev = obj;
42247 +
42248 + obj->next = *curr;
42249 + *curr = obj;
42250 +
42251 + return;
42252 +}
42253 +
42254 +static void
42255 +insert_acl_subj_label(struct acl_subject_label *obj,
42256 + struct acl_role_label *role)
42257 +{
42258 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42259 + struct acl_subject_label **curr;
42260 +
42261 + obj->prev = NULL;
42262 +
42263 + curr = &role->subj_hash[index];
42264 + if (*curr != NULL)
42265 + (*curr)->prev = obj;
42266 +
42267 + obj->next = *curr;
42268 + *curr = obj;
42269 +
42270 + return;
42271 +}
42272 +
42273 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42274 +
42275 +static void *
42276 +create_table(__u32 * len, int elementsize)
42277 +{
42278 + unsigned int table_sizes[] = {
42279 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42280 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42281 + 4194301, 8388593, 16777213, 33554393, 67108859
42282 + };
42283 + void *newtable = NULL;
42284 + unsigned int pwr = 0;
42285 +
42286 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42287 + table_sizes[pwr] <= *len)
42288 + pwr++;
42289 +
42290 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42291 + return newtable;
42292 +
42293 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42294 + newtable =
42295 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42296 + else
42297 + newtable = vmalloc(table_sizes[pwr] * elementsize);
42298 +
42299 + *len = table_sizes[pwr];
42300 +
42301 + return newtable;
42302 +}
42303 +
42304 +static int
42305 +init_variables(const struct gr_arg *arg)
42306 +{
42307 + struct task_struct *reaper = &init_task;
42308 + unsigned int stacksize;
42309 +
42310 + subj_map_set.s_size = arg->role_db.num_subjects;
42311 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42312 + name_set.n_size = arg->role_db.num_objects;
42313 + inodev_set.i_size = arg->role_db.num_objects;
42314 +
42315 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
42316 + !name_set.n_size || !inodev_set.i_size)
42317 + return 1;
42318 +
42319 + if (!gr_init_uidset())
42320 + return 1;
42321 +
42322 + /* set up the stack that holds allocation info */
42323 +
42324 + stacksize = arg->role_db.num_pointers + 5;
42325 +
42326 + if (!acl_alloc_stack_init(stacksize))
42327 + return 1;
42328 +
42329 + /* grab reference for the real root dentry and vfsmount */
42330 + get_fs_root(reaper->fs, &real_root);
42331 +
42332 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42333 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42334 +#endif
42335 +
42336 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42337 + if (fakefs_obj_rw == NULL)
42338 + return 1;
42339 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42340 +
42341 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42342 + if (fakefs_obj_rwx == NULL)
42343 + return 1;
42344 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42345 +
42346 + subj_map_set.s_hash =
42347 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42348 + acl_role_set.r_hash =
42349 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42350 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42351 + inodev_set.i_hash =
42352 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42353 +
42354 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42355 + !name_set.n_hash || !inodev_set.i_hash)
42356 + return 1;
42357 +
42358 + memset(subj_map_set.s_hash, 0,
42359 + sizeof(struct subject_map *) * subj_map_set.s_size);
42360 + memset(acl_role_set.r_hash, 0,
42361 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
42362 + memset(name_set.n_hash, 0,
42363 + sizeof (struct name_entry *) * name_set.n_size);
42364 + memset(inodev_set.i_hash, 0,
42365 + sizeof (struct inodev_entry *) * inodev_set.i_size);
42366 +
42367 + return 0;
42368 +}
42369 +
42370 +/* free information not needed after startup
42371 + currently contains user->kernel pointer mappings for subjects
42372 +*/
42373 +
42374 +static void
42375 +free_init_variables(void)
42376 +{
42377 + __u32 i;
42378 +
42379 + if (subj_map_set.s_hash) {
42380 + for (i = 0; i < subj_map_set.s_size; i++) {
42381 + if (subj_map_set.s_hash[i]) {
42382 + kfree(subj_map_set.s_hash[i]);
42383 + subj_map_set.s_hash[i] = NULL;
42384 + }
42385 + }
42386 +
42387 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42388 + PAGE_SIZE)
42389 + kfree(subj_map_set.s_hash);
42390 + else
42391 + vfree(subj_map_set.s_hash);
42392 + }
42393 +
42394 + return;
42395 +}
42396 +
42397 +static void
42398 +free_variables(void)
42399 +{
42400 + struct acl_subject_label *s;
42401 + struct acl_role_label *r;
42402 + struct task_struct *task, *task2;
42403 + unsigned int x;
42404 +
42405 + gr_clear_learn_entries();
42406 +
42407 + read_lock(&tasklist_lock);
42408 + do_each_thread(task2, task) {
42409 + task->acl_sp_role = 0;
42410 + task->acl_role_id = 0;
42411 + task->acl = NULL;
42412 + task->role = NULL;
42413 + } while_each_thread(task2, task);
42414 + read_unlock(&tasklist_lock);
42415 +
42416 + /* release the reference to the real root dentry and vfsmount */
42417 + path_put(&real_root);
42418 +
42419 + /* free all object hash tables */
42420 +
42421 + FOR_EACH_ROLE_START(r)
42422 + if (r->subj_hash == NULL)
42423 + goto next_role;
42424 + FOR_EACH_SUBJECT_START(r, s, x)
42425 + if (s->obj_hash == NULL)
42426 + break;
42427 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42428 + kfree(s->obj_hash);
42429 + else
42430 + vfree(s->obj_hash);
42431 + FOR_EACH_SUBJECT_END(s, x)
42432 + FOR_EACH_NESTED_SUBJECT_START(r, s)
42433 + if (s->obj_hash == NULL)
42434 + break;
42435 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42436 + kfree(s->obj_hash);
42437 + else
42438 + vfree(s->obj_hash);
42439 + FOR_EACH_NESTED_SUBJECT_END(s)
42440 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42441 + kfree(r->subj_hash);
42442 + else
42443 + vfree(r->subj_hash);
42444 + r->subj_hash = NULL;
42445 +next_role:
42446 + FOR_EACH_ROLE_END(r)
42447 +
42448 + acl_free_all();
42449 +
42450 + if (acl_role_set.r_hash) {
42451 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42452 + PAGE_SIZE)
42453 + kfree(acl_role_set.r_hash);
42454 + else
42455 + vfree(acl_role_set.r_hash);
42456 + }
42457 + if (name_set.n_hash) {
42458 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
42459 + PAGE_SIZE)
42460 + kfree(name_set.n_hash);
42461 + else
42462 + vfree(name_set.n_hash);
42463 + }
42464 +
42465 + if (inodev_set.i_hash) {
42466 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42467 + PAGE_SIZE)
42468 + kfree(inodev_set.i_hash);
42469 + else
42470 + vfree(inodev_set.i_hash);
42471 + }
42472 +
42473 + gr_free_uidset();
42474 +
42475 + memset(&name_set, 0, sizeof (struct name_db));
42476 + memset(&inodev_set, 0, sizeof (struct inodev_db));
42477 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42478 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42479 +
42480 + default_role = NULL;
42481 + role_list = NULL;
42482 +
42483 + return;
42484 +}
42485 +
42486 +static __u32
42487 +count_user_objs(struct acl_object_label *userp)
42488 +{
42489 + struct acl_object_label o_tmp;
42490 + __u32 num = 0;
42491 +
42492 + while (userp) {
42493 + if (copy_from_user(&o_tmp, userp,
42494 + sizeof (struct acl_object_label)))
42495 + break;
42496 +
42497 + userp = o_tmp.prev;
42498 + num++;
42499 + }
42500 +
42501 + return num;
42502 +}
42503 +
42504 +static struct acl_subject_label *
42505 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42506 +
42507 +static int
42508 +copy_user_glob(struct acl_object_label *obj)
42509 +{
42510 + struct acl_object_label *g_tmp, **guser;
42511 + unsigned int len;
42512 + char *tmp;
42513 +
42514 + if (obj->globbed == NULL)
42515 + return 0;
42516 +
42517 + guser = &obj->globbed;
42518 + while (*guser) {
42519 + g_tmp = (struct acl_object_label *)
42520 + acl_alloc(sizeof (struct acl_object_label));
42521 + if (g_tmp == NULL)
42522 + return -ENOMEM;
42523 +
42524 + if (copy_from_user(g_tmp, *guser,
42525 + sizeof (struct acl_object_label)))
42526 + return -EFAULT;
42527 +
42528 + len = strnlen_user(g_tmp->filename, PATH_MAX);
42529 +
42530 + if (!len || len >= PATH_MAX)
42531 + return -EINVAL;
42532 +
42533 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42534 + return -ENOMEM;
42535 +
42536 + if (copy_from_user(tmp, g_tmp->filename, len))
42537 + return -EFAULT;
42538 + tmp[len-1] = '\0';
42539 + g_tmp->filename = tmp;
42540 +
42541 + *guser = g_tmp;
42542 + guser = &(g_tmp->next);
42543 + }
42544 +
42545 + return 0;
42546 +}
42547 +
42548 +static int
42549 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42550 + struct acl_role_label *role)
42551 +{
42552 + struct acl_object_label *o_tmp;
42553 + unsigned int len;
42554 + int ret;
42555 + char *tmp;
42556 +
42557 + while (userp) {
42558 + if ((o_tmp = (struct acl_object_label *)
42559 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
42560 + return -ENOMEM;
42561 +
42562 + if (copy_from_user(o_tmp, userp,
42563 + sizeof (struct acl_object_label)))
42564 + return -EFAULT;
42565 +
42566 + userp = o_tmp->prev;
42567 +
42568 + len = strnlen_user(o_tmp->filename, PATH_MAX);
42569 +
42570 + if (!len || len >= PATH_MAX)
42571 + return -EINVAL;
42572 +
42573 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42574 + return -ENOMEM;
42575 +
42576 + if (copy_from_user(tmp, o_tmp->filename, len))
42577 + return -EFAULT;
42578 + tmp[len-1] = '\0';
42579 + o_tmp->filename = tmp;
42580 +
42581 + insert_acl_obj_label(o_tmp, subj);
42582 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42583 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42584 + return -ENOMEM;
42585 +
42586 + ret = copy_user_glob(o_tmp);
42587 + if (ret)
42588 + return ret;
42589 +
42590 + if (o_tmp->nested) {
42591 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42592 + if (IS_ERR(o_tmp->nested))
42593 + return PTR_ERR(o_tmp->nested);
42594 +
42595 + /* insert into nested subject list */
42596 + o_tmp->nested->next = role->hash->first;
42597 + role->hash->first = o_tmp->nested;
42598 + }
42599 + }
42600 +
42601 + return 0;
42602 +}
42603 +
42604 +static __u32
42605 +count_user_subjs(struct acl_subject_label *userp)
42606 +{
42607 + struct acl_subject_label s_tmp;
42608 + __u32 num = 0;
42609 +
42610 + while (userp) {
42611 + if (copy_from_user(&s_tmp, userp,
42612 + sizeof (struct acl_subject_label)))
42613 + break;
42614 +
42615 + userp = s_tmp.prev;
42616 + /* do not count nested subjects against this count, since
42617 + they are not included in the hash table, but are
42618 + attached to objects. We have already counted
42619 + the subjects in userspace for the allocation
42620 + stack
42621 + */
42622 + if (!(s_tmp.mode & GR_NESTED))
42623 + num++;
42624 + }
42625 +
42626 + return num;
42627 +}
42628 +
42629 +static int
42630 +copy_user_allowedips(struct acl_role_label *rolep)
42631 +{
42632 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42633 +
42634 + ruserip = rolep->allowed_ips;
42635 +
42636 + while (ruserip) {
42637 + rlast = rtmp;
42638 +
42639 + if ((rtmp = (struct role_allowed_ip *)
42640 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42641 + return -ENOMEM;
42642 +
42643 + if (copy_from_user(rtmp, ruserip,
42644 + sizeof (struct role_allowed_ip)))
42645 + return -EFAULT;
42646 +
42647 + ruserip = rtmp->prev;
42648 +
42649 + if (!rlast) {
42650 + rtmp->prev = NULL;
42651 + rolep->allowed_ips = rtmp;
42652 + } else {
42653 + rlast->next = rtmp;
42654 + rtmp->prev = rlast;
42655 + }
42656 +
42657 + if (!ruserip)
42658 + rtmp->next = NULL;
42659 + }
42660 +
42661 + return 0;
42662 +}
42663 +
42664 +static int
42665 +copy_user_transitions(struct acl_role_label *rolep)
42666 +{
42667 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
42668 +
42669 + unsigned int len;
42670 + char *tmp;
42671 +
42672 + rusertp = rolep->transitions;
42673 +
42674 + while (rusertp) {
42675 + rlast = rtmp;
42676 +
42677 + if ((rtmp = (struct role_transition *)
42678 + acl_alloc(sizeof (struct role_transition))) == NULL)
42679 + return -ENOMEM;
42680 +
42681 + if (copy_from_user(rtmp, rusertp,
42682 + sizeof (struct role_transition)))
42683 + return -EFAULT;
42684 +
42685 + rusertp = rtmp->prev;
42686 +
42687 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42688 +
42689 + if (!len || len >= GR_SPROLE_LEN)
42690 + return -EINVAL;
42691 +
42692 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42693 + return -ENOMEM;
42694 +
42695 + if (copy_from_user(tmp, rtmp->rolename, len))
42696 + return -EFAULT;
42697 + tmp[len-1] = '\0';
42698 + rtmp->rolename = tmp;
42699 +
42700 + if (!rlast) {
42701 + rtmp->prev = NULL;
42702 + rolep->transitions = rtmp;
42703 + } else {
42704 + rlast->next = rtmp;
42705 + rtmp->prev = rlast;
42706 + }
42707 +
42708 + if (!rusertp)
42709 + rtmp->next = NULL;
42710 + }
42711 +
42712 + return 0;
42713 +}
42714 +
42715 +static struct acl_subject_label *
42716 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42717 +{
42718 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42719 + unsigned int len;
42720 + char *tmp;
42721 + __u32 num_objs;
42722 + struct acl_ip_label **i_tmp, *i_utmp2;
42723 + struct gr_hash_struct ghash;
42724 + struct subject_map *subjmap;
42725 + unsigned int i_num;
42726 + int err;
42727 +
42728 + s_tmp = lookup_subject_map(userp);
42729 +
42730 + /* we've already copied this subject into the kernel, just return
42731 + the reference to it, and don't copy it over again
42732 + */
42733 + if (s_tmp)
42734 + return(s_tmp);
42735 +
42736 + if ((s_tmp = (struct acl_subject_label *)
42737 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42738 + return ERR_PTR(-ENOMEM);
42739 +
42740 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42741 + if (subjmap == NULL)
42742 + return ERR_PTR(-ENOMEM);
42743 +
42744 + subjmap->user = userp;
42745 + subjmap->kernel = s_tmp;
42746 + insert_subj_map_entry(subjmap);
42747 +
42748 + if (copy_from_user(s_tmp, userp,
42749 + sizeof (struct acl_subject_label)))
42750 + return ERR_PTR(-EFAULT);
42751 +
42752 + len = strnlen_user(s_tmp->filename, PATH_MAX);
42753 +
42754 + if (!len || len >= PATH_MAX)
42755 + return ERR_PTR(-EINVAL);
42756 +
42757 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42758 + return ERR_PTR(-ENOMEM);
42759 +
42760 + if (copy_from_user(tmp, s_tmp->filename, len))
42761 + return ERR_PTR(-EFAULT);
42762 + tmp[len-1] = '\0';
42763 + s_tmp->filename = tmp;
42764 +
42765 + if (!strcmp(s_tmp->filename, "/"))
42766 + role->root_label = s_tmp;
42767 +
42768 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42769 + return ERR_PTR(-EFAULT);
42770 +
42771 + /* copy user and group transition tables */
42772 +
42773 + if (s_tmp->user_trans_num) {
42774 + uid_t *uidlist;
42775 +
42776 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42777 + if (uidlist == NULL)
42778 + return ERR_PTR(-ENOMEM);
42779 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42780 + return ERR_PTR(-EFAULT);
42781 +
42782 + s_tmp->user_transitions = uidlist;
42783 + }
42784 +
42785 + if (s_tmp->group_trans_num) {
42786 + gid_t *gidlist;
42787 +
42788 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42789 + if (gidlist == NULL)
42790 + return ERR_PTR(-ENOMEM);
42791 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42792 + return ERR_PTR(-EFAULT);
42793 +
42794 + s_tmp->group_transitions = gidlist;
42795 + }
42796 +
42797 + /* set up object hash table */
42798 + num_objs = count_user_objs(ghash.first);
42799 +
42800 + s_tmp->obj_hash_size = num_objs;
42801 + s_tmp->obj_hash =
42802 + (struct acl_object_label **)
42803 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42804 +
42805 + if (!s_tmp->obj_hash)
42806 + return ERR_PTR(-ENOMEM);
42807 +
42808 + memset(s_tmp->obj_hash, 0,
42809 + s_tmp->obj_hash_size *
42810 + sizeof (struct acl_object_label *));
42811 +
42812 + /* add in objects */
42813 + err = copy_user_objs(ghash.first, s_tmp, role);
42814 +
42815 + if (err)
42816 + return ERR_PTR(err);
42817 +
42818 + /* set pointer for parent subject */
42819 + if (s_tmp->parent_subject) {
42820 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42821 +
42822 + if (IS_ERR(s_tmp2))
42823 + return s_tmp2;
42824 +
42825 + s_tmp->parent_subject = s_tmp2;
42826 + }
42827 +
42828 + /* add in ip acls */
42829 +
42830 + if (!s_tmp->ip_num) {
42831 + s_tmp->ips = NULL;
42832 + goto insert;
42833 + }
42834 +
42835 + i_tmp =
42836 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42837 + sizeof (struct acl_ip_label *));
42838 +
42839 + if (!i_tmp)
42840 + return ERR_PTR(-ENOMEM);
42841 +
42842 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42843 + *(i_tmp + i_num) =
42844 + (struct acl_ip_label *)
42845 + acl_alloc(sizeof (struct acl_ip_label));
42846 + if (!*(i_tmp + i_num))
42847 + return ERR_PTR(-ENOMEM);
42848 +
42849 + if (copy_from_user
42850 + (&i_utmp2, s_tmp->ips + i_num,
42851 + sizeof (struct acl_ip_label *)))
42852 + return ERR_PTR(-EFAULT);
42853 +
42854 + if (copy_from_user
42855 + (*(i_tmp + i_num), i_utmp2,
42856 + sizeof (struct acl_ip_label)))
42857 + return ERR_PTR(-EFAULT);
42858 +
42859 + if ((*(i_tmp + i_num))->iface == NULL)
42860 + continue;
42861 +
42862 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42863 + if (!len || len >= IFNAMSIZ)
42864 + return ERR_PTR(-EINVAL);
42865 + tmp = acl_alloc(len);
42866 + if (tmp == NULL)
42867 + return ERR_PTR(-ENOMEM);
42868 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42869 + return ERR_PTR(-EFAULT);
42870 + (*(i_tmp + i_num))->iface = tmp;
42871 + }
42872 +
42873 + s_tmp->ips = i_tmp;
42874 +
42875 +insert:
42876 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42877 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42878 + return ERR_PTR(-ENOMEM);
42879 +
42880 + return s_tmp;
42881 +}
42882 +
42883 +static int
42884 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42885 +{
42886 + struct acl_subject_label s_pre;
42887 + struct acl_subject_label * ret;
42888 + int err;
42889 +
42890 + while (userp) {
42891 + if (copy_from_user(&s_pre, userp,
42892 + sizeof (struct acl_subject_label)))
42893 + return -EFAULT;
42894 +
42895 + /* do not add nested subjects here, add
42896 + while parsing objects
42897 + */
42898 +
42899 + if (s_pre.mode & GR_NESTED) {
42900 + userp = s_pre.prev;
42901 + continue;
42902 + }
42903 +
42904 + ret = do_copy_user_subj(userp, role);
42905 +
42906 + err = PTR_ERR(ret);
42907 + if (IS_ERR(ret))
42908 + return err;
42909 +
42910 + insert_acl_subj_label(ret, role);
42911 +
42912 + userp = s_pre.prev;
42913 + }
42914 +
42915 + return 0;
42916 +}
42917 +
42918 +static int
42919 +copy_user_acl(struct gr_arg *arg)
42920 +{
42921 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42922 + struct sprole_pw *sptmp;
42923 + struct gr_hash_struct *ghash;
42924 + uid_t *domainlist;
42925 + unsigned int r_num;
42926 + unsigned int len;
42927 + char *tmp;
42928 + int err = 0;
42929 + __u16 i;
42930 + __u32 num_subjs;
42931 +
42932 + /* we need a default and kernel role */
42933 + if (arg->role_db.num_roles < 2)
42934 + return -EINVAL;
42935 +
42936 + /* copy special role authentication info from userspace */
42937 +
42938 + num_sprole_pws = arg->num_sprole_pws;
42939 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42940 +
42941 + if (!acl_special_roles) {
42942 + err = -ENOMEM;
42943 + goto cleanup;
42944 + }
42945 +
42946 + for (i = 0; i < num_sprole_pws; i++) {
42947 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42948 + if (!sptmp) {
42949 + err = -ENOMEM;
42950 + goto cleanup;
42951 + }
42952 + if (copy_from_user(sptmp, arg->sprole_pws + i,
42953 + sizeof (struct sprole_pw))) {
42954 + err = -EFAULT;
42955 + goto cleanup;
42956 + }
42957 +
42958 + len =
42959 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42960 +
42961 + if (!len || len >= GR_SPROLE_LEN) {
42962 + err = -EINVAL;
42963 + goto cleanup;
42964 + }
42965 +
42966 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42967 + err = -ENOMEM;
42968 + goto cleanup;
42969 + }
42970 +
42971 + if (copy_from_user(tmp, sptmp->rolename, len)) {
42972 + err = -EFAULT;
42973 + goto cleanup;
42974 + }
42975 + tmp[len-1] = '\0';
42976 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42977 + printk(KERN_ALERT "Copying special role %s\n", tmp);
42978 +#endif
42979 + sptmp->rolename = tmp;
42980 + acl_special_roles[i] = sptmp;
42981 + }
42982 +
42983 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42984 +
42985 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42986 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
42987 +
42988 + if (!r_tmp) {
42989 + err = -ENOMEM;
42990 + goto cleanup;
42991 + }
42992 +
42993 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
42994 + sizeof (struct acl_role_label *))) {
42995 + err = -EFAULT;
42996 + goto cleanup;
42997 + }
42998 +
42999 + if (copy_from_user(r_tmp, r_utmp2,
43000 + sizeof (struct acl_role_label))) {
43001 + err = -EFAULT;
43002 + goto cleanup;
43003 + }
43004 +
43005 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
43006 +
43007 + if (!len || len >= PATH_MAX) {
43008 + err = -EINVAL;
43009 + goto cleanup;
43010 + }
43011 +
43012 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
43013 + err = -ENOMEM;
43014 + goto cleanup;
43015 + }
43016 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
43017 + err = -EFAULT;
43018 + goto cleanup;
43019 + }
43020 + tmp[len-1] = '\0';
43021 + r_tmp->rolename = tmp;
43022 +
43023 + if (!strcmp(r_tmp->rolename, "default")
43024 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
43025 + default_role = r_tmp;
43026 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
43027 + kernel_role = r_tmp;
43028 + }
43029 +
43030 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
43031 + err = -ENOMEM;
43032 + goto cleanup;
43033 + }
43034 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
43035 + err = -EFAULT;
43036 + goto cleanup;
43037 + }
43038 +
43039 + r_tmp->hash = ghash;
43040 +
43041 + num_subjs = count_user_subjs(r_tmp->hash->first);
43042 +
43043 + r_tmp->subj_hash_size = num_subjs;
43044 + r_tmp->subj_hash =
43045 + (struct acl_subject_label **)
43046 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
43047 +
43048 + if (!r_tmp->subj_hash) {
43049 + err = -ENOMEM;
43050 + goto cleanup;
43051 + }
43052 +
43053 + err = copy_user_allowedips(r_tmp);
43054 + if (err)
43055 + goto cleanup;
43056 +
43057 + /* copy domain info */
43058 + if (r_tmp->domain_children != NULL) {
43059 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
43060 + if (domainlist == NULL) {
43061 + err = -ENOMEM;
43062 + goto cleanup;
43063 + }
43064 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
43065 + err = -EFAULT;
43066 + goto cleanup;
43067 + }
43068 + r_tmp->domain_children = domainlist;
43069 + }
43070 +
43071 + err = copy_user_transitions(r_tmp);
43072 + if (err)
43073 + goto cleanup;
43074 +
43075 + memset(r_tmp->subj_hash, 0,
43076 + r_tmp->subj_hash_size *
43077 + sizeof (struct acl_subject_label *));
43078 +
43079 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
43080 +
43081 + if (err)
43082 + goto cleanup;
43083 +
43084 + /* set nested subject list to null */
43085 + r_tmp->hash->first = NULL;
43086 +
43087 + insert_acl_role_label(r_tmp);
43088 + }
43089 +
43090 + goto return_err;
43091 + cleanup:
43092 + free_variables();
43093 + return_err:
43094 + return err;
43095 +
43096 +}
43097 +
43098 +static int
43099 +gracl_init(struct gr_arg *args)
43100 +{
43101 + int error = 0;
43102 +
43103 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
43104 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
43105 +
43106 + if (init_variables(args)) {
43107 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
43108 + error = -ENOMEM;
43109 + free_variables();
43110 + goto out;
43111 + }
43112 +
43113 + error = copy_user_acl(args);
43114 + free_init_variables();
43115 + if (error) {
43116 + free_variables();
43117 + goto out;
43118 + }
43119 +
43120 + if ((error = gr_set_acls(0))) {
43121 + free_variables();
43122 + goto out;
43123 + }
43124 +
43125 + pax_open_kernel();
43126 + gr_status |= GR_READY;
43127 + pax_close_kernel();
43128 +
43129 + out:
43130 + return error;
43131 +}
43132 +
43133 +/* derived from glibc fnmatch() 0: match, 1: no match*/
43134 +
43135 +static int
43136 +glob_match(const char *p, const char *n)
43137 +{
43138 + char c;
43139 +
43140 + while ((c = *p++) != '\0') {
43141 + switch (c) {
43142 + case '?':
43143 + if (*n == '\0')
43144 + return 1;
43145 + else if (*n == '/')
43146 + return 1;
43147 + break;
43148 + case '\\':
43149 + if (*n != c)
43150 + return 1;
43151 + break;
43152 + case '*':
43153 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
43154 + if (*n == '/')
43155 + return 1;
43156 + else if (c == '?') {
43157 + if (*n == '\0')
43158 + return 1;
43159 + else
43160 + ++n;
43161 + }
43162 + }
43163 + if (c == '\0') {
43164 + return 0;
43165 + } else {
43166 + const char *endp;
43167 +
43168 + if ((endp = strchr(n, '/')) == NULL)
43169 + endp = n + strlen(n);
43170 +
43171 + if (c == '[') {
43172 + for (--p; n < endp; ++n)
43173 + if (!glob_match(p, n))
43174 + return 0;
43175 + } else if (c == '/') {
43176 + while (*n != '\0' && *n != '/')
43177 + ++n;
43178 + if (*n == '/' && !glob_match(p, n + 1))
43179 + return 0;
43180 + } else {
43181 + for (--p; n < endp; ++n)
43182 + if (*n == c && !glob_match(p, n))
43183 + return 0;
43184 + }
43185 +
43186 + return 1;
43187 + }
43188 + case '[':
43189 + {
43190 + int not;
43191 + char cold;
43192 +
43193 + if (*n == '\0' || *n == '/')
43194 + return 1;
43195 +
43196 + not = (*p == '!' || *p == '^');
43197 + if (not)
43198 + ++p;
43199 +
43200 + c = *p++;
43201 + for (;;) {
43202 + unsigned char fn = (unsigned char)*n;
43203 +
43204 + if (c == '\0')
43205 + return 1;
43206 + else {
43207 + if (c == fn)
43208 + goto matched;
43209 + cold = c;
43210 + c = *p++;
43211 +
43212 + if (c == '-' && *p != ']') {
43213 + unsigned char cend = *p++;
43214 +
43215 + if (cend == '\0')
43216 + return 1;
43217 +
43218 + if (cold <= fn && fn <= cend)
43219 + goto matched;
43220 +
43221 + c = *p++;
43222 + }
43223 + }
43224 +
43225 + if (c == ']')
43226 + break;
43227 + }
43228 + if (!not)
43229 + return 1;
43230 + break;
43231 + matched:
43232 + while (c != ']') {
43233 + if (c == '\0')
43234 + return 1;
43235 +
43236 + c = *p++;
43237 + }
43238 + if (not)
43239 + return 1;
43240 + }
43241 + break;
43242 + default:
43243 + if (c != *n)
43244 + return 1;
43245 + }
43246 +
43247 + ++n;
43248 + }
43249 +
43250 + if (*n == '\0')
43251 + return 0;
43252 +
43253 + if (*n == '/')
43254 + return 0;
43255 +
43256 + return 1;
43257 +}
43258 +
43259 +static struct acl_object_label *
43260 +chk_glob_label(struct acl_object_label *globbed,
43261 + struct dentry *dentry, struct vfsmount *mnt, char **path)
43262 +{
43263 + struct acl_object_label *tmp;
43264 +
43265 + if (*path == NULL)
43266 + *path = gr_to_filename_nolock(dentry, mnt);
43267 +
43268 + tmp = globbed;
43269 +
43270 + while (tmp) {
43271 + if (!glob_match(tmp->filename, *path))
43272 + return tmp;
43273 + tmp = tmp->next;
43274 + }
43275 +
43276 + return NULL;
43277 +}
43278 +
43279 +static struct acl_object_label *
43280 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43281 + const ino_t curr_ino, const dev_t curr_dev,
43282 + const struct acl_subject_label *subj, char **path, const int checkglob)
43283 +{
43284 + struct acl_subject_label *tmpsubj;
43285 + struct acl_object_label *retval;
43286 + struct acl_object_label *retval2;
43287 +
43288 + tmpsubj = (struct acl_subject_label *) subj;
43289 + read_lock(&gr_inode_lock);
43290 + do {
43291 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43292 + if (retval) {
43293 + if (checkglob && retval->globbed) {
43294 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43295 + (struct vfsmount *)orig_mnt, path);
43296 + if (retval2)
43297 + retval = retval2;
43298 + }
43299 + break;
43300 + }
43301 + } while ((tmpsubj = tmpsubj->parent_subject));
43302 + read_unlock(&gr_inode_lock);
43303 +
43304 + return retval;
43305 +}
43306 +
43307 +static __inline__ struct acl_object_label *
43308 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43309 + struct dentry *curr_dentry,
43310 + const struct acl_subject_label *subj, char **path, const int checkglob)
43311 +{
43312 + int newglob = checkglob;
43313 + ino_t inode;
43314 + dev_t device;
43315 +
43316 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43317 + as we don't want a / * rule to match instead of the / object
43318 + don't do this for create lookups that call this function though, since they're looking up
43319 + on the parent and thus need globbing checks on all paths
43320 + */
43321 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43322 + newglob = GR_NO_GLOB;
43323 +
43324 + spin_lock(&curr_dentry->d_lock);
43325 + inode = curr_dentry->d_inode->i_ino;
43326 + device = __get_dev(curr_dentry);
43327 + spin_unlock(&curr_dentry->d_lock);
43328 +
43329 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43330 +}
43331 +
43332 +static struct acl_object_label *
43333 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43334 + const struct acl_subject_label *subj, char *path, const int checkglob)
43335 +{
43336 + struct dentry *dentry = (struct dentry *) l_dentry;
43337 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43338 + struct acl_object_label *retval;
43339 + struct dentry *parent;
43340 +
43341 + write_seqlock(&rename_lock);
43342 + br_read_lock(vfsmount_lock);
43343 +
43344 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43345 +#ifdef CONFIG_NET
43346 + mnt == sock_mnt ||
43347 +#endif
43348 +#ifdef CONFIG_HUGETLBFS
43349 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43350 +#endif
43351 + /* ignore Eric Biederman */
43352 + IS_PRIVATE(l_dentry->d_inode))) {
43353 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43354 + goto out;
43355 + }
43356 +
43357 + for (;;) {
43358 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43359 + break;
43360 +
43361 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43362 + if (mnt->mnt_parent == mnt)
43363 + break;
43364 +
43365 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43366 + if (retval != NULL)
43367 + goto out;
43368 +
43369 + dentry = mnt->mnt_mountpoint;
43370 + mnt = mnt->mnt_parent;
43371 + continue;
43372 + }
43373 +
43374 + parent = dentry->d_parent;
43375 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43376 + if (retval != NULL)
43377 + goto out;
43378 +
43379 + dentry = parent;
43380 + }
43381 +
43382 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43383 +
43384 + /* real_root is pinned so we don't have to hold a reference */
43385 + if (retval == NULL)
43386 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43387 +out:
43388 + br_read_unlock(vfsmount_lock);
43389 + write_sequnlock(&rename_lock);
43390 +
43391 + BUG_ON(retval == NULL);
43392 +
43393 + return retval;
43394 +}
43395 +
43396 +static __inline__ struct acl_object_label *
43397 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43398 + const struct acl_subject_label *subj)
43399 +{
43400 + char *path = NULL;
43401 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43402 +}
43403 +
43404 +static __inline__ struct acl_object_label *
43405 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43406 + const struct acl_subject_label *subj)
43407 +{
43408 + char *path = NULL;
43409 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43410 +}
43411 +
43412 +static __inline__ struct acl_object_label *
43413 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43414 + const struct acl_subject_label *subj, char *path)
43415 +{
43416 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43417 +}
43418 +
43419 +static struct acl_subject_label *
43420 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43421 + const struct acl_role_label *role)
43422 +{
43423 + struct dentry *dentry = (struct dentry *) l_dentry;
43424 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43425 + struct acl_subject_label *retval;
43426 + struct dentry *parent;
43427 +
43428 + write_seqlock(&rename_lock);
43429 + br_read_lock(vfsmount_lock);
43430 +
43431 + for (;;) {
43432 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43433 + break;
43434 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43435 + if (mnt->mnt_parent == mnt)
43436 + break;
43437 +
43438 + spin_lock(&dentry->d_lock);
43439 + read_lock(&gr_inode_lock);
43440 + retval =
43441 + lookup_acl_subj_label(dentry->d_inode->i_ino,
43442 + __get_dev(dentry), role);
43443 + read_unlock(&gr_inode_lock);
43444 + spin_unlock(&dentry->d_lock);
43445 + if (retval != NULL)
43446 + goto out;
43447 +
43448 + dentry = mnt->mnt_mountpoint;
43449 + mnt = mnt->mnt_parent;
43450 + continue;
43451 + }
43452 +
43453 + spin_lock(&dentry->d_lock);
43454 + read_lock(&gr_inode_lock);
43455 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43456 + __get_dev(dentry), role);
43457 + read_unlock(&gr_inode_lock);
43458 + parent = dentry->d_parent;
43459 + spin_unlock(&dentry->d_lock);
43460 +
43461 + if (retval != NULL)
43462 + goto out;
43463 +
43464 + dentry = parent;
43465 + }
43466 +
43467 + spin_lock(&dentry->d_lock);
43468 + read_lock(&gr_inode_lock);
43469 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43470 + __get_dev(dentry), role);
43471 + read_unlock(&gr_inode_lock);
43472 + spin_unlock(&dentry->d_lock);
43473 +
43474 + if (unlikely(retval == NULL)) {
43475 + /* real_root is pinned, we don't need to hold a reference */
43476 + read_lock(&gr_inode_lock);
43477 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43478 + __get_dev(real_root.dentry), role);
43479 + read_unlock(&gr_inode_lock);
43480 + }
43481 +out:
43482 + br_read_unlock(vfsmount_lock);
43483 + write_sequnlock(&rename_lock);
43484 +
43485 + BUG_ON(retval == NULL);
43486 +
43487 + return retval;
43488 +}
43489 +
43490 +static void
43491 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43492 +{
43493 + struct task_struct *task = current;
43494 + const struct cred *cred = current_cred();
43495 +
43496 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43497 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43498 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43499 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43500 +
43501 + return;
43502 +}
43503 +
43504 +static void
43505 +gr_log_learn_sysctl(const char *path, const __u32 mode)
43506 +{
43507 + struct task_struct *task = current;
43508 + const struct cred *cred = current_cred();
43509 +
43510 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43511 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43512 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43513 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43514 +
43515 + return;
43516 +}
43517 +
43518 +static void
43519 +gr_log_learn_id_change(const char type, const unsigned int real,
43520 + const unsigned int effective, const unsigned int fs)
43521 +{
43522 + struct task_struct *task = current;
43523 + const struct cred *cred = current_cred();
43524 +
43525 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43526 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43527 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43528 + type, real, effective, fs, &task->signal->saved_ip);
43529 +
43530 + return;
43531 +}
43532 +
43533 +__u32
43534 +gr_check_link(const struct dentry * new_dentry,
43535 + const struct dentry * parent_dentry,
43536 + const struct vfsmount * parent_mnt,
43537 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43538 +{
43539 + struct acl_object_label *obj;
43540 + __u32 oldmode, newmode;
43541 + __u32 needmode;
43542 +
43543 + if (unlikely(!(gr_status & GR_READY)))
43544 + return (GR_CREATE | GR_LINK);
43545 +
43546 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43547 + oldmode = obj->mode;
43548 +
43549 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43550 + oldmode |= (GR_CREATE | GR_LINK);
43551 +
43552 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43553 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43554 + needmode |= GR_SETID | GR_AUDIT_SETID;
43555 +
43556 + newmode =
43557 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
43558 + oldmode | needmode);
43559 +
43560 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43561 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43562 + GR_INHERIT | GR_AUDIT_INHERIT);
43563 +
43564 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43565 + goto bad;
43566 +
43567 + if ((oldmode & needmode) != needmode)
43568 + goto bad;
43569 +
43570 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43571 + if ((newmode & needmode) != needmode)
43572 + goto bad;
43573 +
43574 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43575 + return newmode;
43576 +bad:
43577 + needmode = oldmode;
43578 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43579 + needmode |= GR_SETID;
43580 +
43581 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43582 + gr_log_learn(old_dentry, old_mnt, needmode);
43583 + return (GR_CREATE | GR_LINK);
43584 + } else if (newmode & GR_SUPPRESS)
43585 + return GR_SUPPRESS;
43586 + else
43587 + return 0;
43588 +}
43589 +
43590 +__u32
43591 +gr_search_file(const struct dentry * dentry, const __u32 mode,
43592 + const struct vfsmount * mnt)
43593 +{
43594 + __u32 retval = mode;
43595 + struct acl_subject_label *curracl;
43596 + struct acl_object_label *currobj;
43597 +
43598 + if (unlikely(!(gr_status & GR_READY)))
43599 + return (mode & ~GR_AUDITS);
43600 +
43601 + curracl = current->acl;
43602 +
43603 + currobj = chk_obj_label(dentry, mnt, curracl);
43604 + retval = currobj->mode & mode;
43605 +
43606 + /* if we're opening a specified transfer file for writing
43607 + (e.g. /dev/initctl), then transfer our role to init
43608 + */
43609 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43610 + current->role->roletype & GR_ROLE_PERSIST)) {
43611 + struct task_struct *task = init_pid_ns.child_reaper;
43612 +
43613 + if (task->role != current->role) {
43614 + task->acl_sp_role = 0;
43615 + task->acl_role_id = current->acl_role_id;
43616 + task->role = current->role;
43617 + rcu_read_lock();
43618 + read_lock(&grsec_exec_file_lock);
43619 + gr_apply_subject_to_task(task);
43620 + read_unlock(&grsec_exec_file_lock);
43621 + rcu_read_unlock();
43622 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43623 + }
43624 + }
43625 +
43626 + if (unlikely
43627 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43628 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43629 + __u32 new_mode = mode;
43630 +
43631 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43632 +
43633 + retval = new_mode;
43634 +
43635 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43636 + new_mode |= GR_INHERIT;
43637 +
43638 + if (!(mode & GR_NOLEARN))
43639 + gr_log_learn(dentry, mnt, new_mode);
43640 + }
43641 +
43642 + return retval;
43643 +}
43644 +
43645 +__u32
43646 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43647 + const struct vfsmount * mnt, const __u32 mode)
43648 +{
43649 + struct name_entry *match;
43650 + struct acl_object_label *matchpo;
43651 + struct acl_subject_label *curracl;
43652 + char *path;
43653 + __u32 retval;
43654 +
43655 + if (unlikely(!(gr_status & GR_READY)))
43656 + return (mode & ~GR_AUDITS);
43657 +
43658 + preempt_disable();
43659 + path = gr_to_filename_rbac(new_dentry, mnt);
43660 + match = lookup_name_entry_create(path);
43661 +
43662 + if (!match)
43663 + goto check_parent;
43664 +
43665 + curracl = current->acl;
43666 +
43667 + read_lock(&gr_inode_lock);
43668 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43669 + read_unlock(&gr_inode_lock);
43670 +
43671 + if (matchpo) {
43672 + if ((matchpo->mode & mode) !=
43673 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
43674 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43675 + __u32 new_mode = mode;
43676 +
43677 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43678 +
43679 + gr_log_learn(new_dentry, mnt, new_mode);
43680 +
43681 + preempt_enable();
43682 + return new_mode;
43683 + }
43684 + preempt_enable();
43685 + return (matchpo->mode & mode);
43686 + }
43687 +
43688 + check_parent:
43689 + curracl = current->acl;
43690 +
43691 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43692 + retval = matchpo->mode & mode;
43693 +
43694 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43695 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43696 + __u32 new_mode = mode;
43697 +
43698 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43699 +
43700 + gr_log_learn(new_dentry, mnt, new_mode);
43701 + preempt_enable();
43702 + return new_mode;
43703 + }
43704 +
43705 + preempt_enable();
43706 + return retval;
43707 +}
43708 +
43709 +int
43710 +gr_check_hidden_task(const struct task_struct *task)
43711 +{
43712 + if (unlikely(!(gr_status & GR_READY)))
43713 + return 0;
43714 +
43715 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43716 + return 1;
43717 +
43718 + return 0;
43719 +}
43720 +
43721 +int
43722 +gr_check_protected_task(const struct task_struct *task)
43723 +{
43724 + if (unlikely(!(gr_status & GR_READY) || !task))
43725 + return 0;
43726 +
43727 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43728 + task->acl != current->acl)
43729 + return 1;
43730 +
43731 + return 0;
43732 +}
43733 +
43734 +int
43735 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43736 +{
43737 + struct task_struct *p;
43738 + int ret = 0;
43739 +
43740 + if (unlikely(!(gr_status & GR_READY) || !pid))
43741 + return ret;
43742 +
43743 + read_lock(&tasklist_lock);
43744 + do_each_pid_task(pid, type, p) {
43745 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43746 + p->acl != current->acl) {
43747 + ret = 1;
43748 + goto out;
43749 + }
43750 + } while_each_pid_task(pid, type, p);
43751 +out:
43752 + read_unlock(&tasklist_lock);
43753 +
43754 + return ret;
43755 +}
43756 +
43757 +void
43758 +gr_copy_label(struct task_struct *tsk)
43759 +{
43760 + tsk->signal->used_accept = 0;
43761 + tsk->acl_sp_role = 0;
43762 + tsk->acl_role_id = current->acl_role_id;
43763 + tsk->acl = current->acl;
43764 + tsk->role = current->role;
43765 + tsk->signal->curr_ip = current->signal->curr_ip;
43766 + tsk->signal->saved_ip = current->signal->saved_ip;
43767 + if (current->exec_file)
43768 + get_file(current->exec_file);
43769 + tsk->exec_file = current->exec_file;
43770 + tsk->is_writable = current->is_writable;
43771 + if (unlikely(current->signal->used_accept)) {
43772 + current->signal->curr_ip = 0;
43773 + current->signal->saved_ip = 0;
43774 + }
43775 +
43776 + return;
43777 +}
43778 +
43779 +static void
43780 +gr_set_proc_res(struct task_struct *task)
43781 +{
43782 + struct acl_subject_label *proc;
43783 + unsigned short i;
43784 +
43785 + proc = task->acl;
43786 +
43787 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43788 + return;
43789 +
43790 + for (i = 0; i < RLIM_NLIMITS; i++) {
43791 + if (!(proc->resmask & (1 << i)))
43792 + continue;
43793 +
43794 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43795 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43796 + }
43797 +
43798 + return;
43799 +}
43800 +
43801 +extern int __gr_process_user_ban(struct user_struct *user);
43802 +
43803 +int
43804 +gr_check_user_change(int real, int effective, int fs)
43805 +{
43806 + unsigned int i;
43807 + __u16 num;
43808 + uid_t *uidlist;
43809 + int curuid;
43810 + int realok = 0;
43811 + int effectiveok = 0;
43812 + int fsok = 0;
43813 +
43814 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43815 + struct user_struct *user;
43816 +
43817 + if (real == -1)
43818 + goto skipit;
43819 +
43820 + user = find_user(real);
43821 + if (user == NULL)
43822 + goto skipit;
43823 +
43824 + if (__gr_process_user_ban(user)) {
43825 + /* for find_user */
43826 + free_uid(user);
43827 + return 1;
43828 + }
43829 +
43830 + /* for find_user */
43831 + free_uid(user);
43832 +
43833 +skipit:
43834 +#endif
43835 +
43836 + if (unlikely(!(gr_status & GR_READY)))
43837 + return 0;
43838 +
43839 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43840 + gr_log_learn_id_change('u', real, effective, fs);
43841 +
43842 + num = current->acl->user_trans_num;
43843 + uidlist = current->acl->user_transitions;
43844 +
43845 + if (uidlist == NULL)
43846 + return 0;
43847 +
43848 + if (real == -1)
43849 + realok = 1;
43850 + if (effective == -1)
43851 + effectiveok = 1;
43852 + if (fs == -1)
43853 + fsok = 1;
43854 +
43855 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
43856 + for (i = 0; i < num; i++) {
43857 + curuid = (int)uidlist[i];
43858 + if (real == curuid)
43859 + realok = 1;
43860 + if (effective == curuid)
43861 + effectiveok = 1;
43862 + if (fs == curuid)
43863 + fsok = 1;
43864 + }
43865 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
43866 + for (i = 0; i < num; i++) {
43867 + curuid = (int)uidlist[i];
43868 + if (real == curuid)
43869 + break;
43870 + if (effective == curuid)
43871 + break;
43872 + if (fs == curuid)
43873 + break;
43874 + }
43875 + /* not in deny list */
43876 + if (i == num) {
43877 + realok = 1;
43878 + effectiveok = 1;
43879 + fsok = 1;
43880 + }
43881 + }
43882 +
43883 + if (realok && effectiveok && fsok)
43884 + return 0;
43885 + else {
43886 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43887 + return 1;
43888 + }
43889 +}
43890 +
43891 +int
43892 +gr_check_group_change(int real, int effective, int fs)
43893 +{
43894 + unsigned int i;
43895 + __u16 num;
43896 + gid_t *gidlist;
43897 + int curgid;
43898 + int realok = 0;
43899 + int effectiveok = 0;
43900 + int fsok = 0;
43901 +
43902 + if (unlikely(!(gr_status & GR_READY)))
43903 + return 0;
43904 +
43905 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43906 + gr_log_learn_id_change('g', real, effective, fs);
43907 +
43908 + num = current->acl->group_trans_num;
43909 + gidlist = current->acl->group_transitions;
43910 +
43911 + if (gidlist == NULL)
43912 + return 0;
43913 +
43914 + if (real == -1)
43915 + realok = 1;
43916 + if (effective == -1)
43917 + effectiveok = 1;
43918 + if (fs == -1)
43919 + fsok = 1;
43920 +
43921 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
43922 + for (i = 0; i < num; i++) {
43923 + curgid = (int)gidlist[i];
43924 + if (real == curgid)
43925 + realok = 1;
43926 + if (effective == curgid)
43927 + effectiveok = 1;
43928 + if (fs == curgid)
43929 + fsok = 1;
43930 + }
43931 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
43932 + for (i = 0; i < num; i++) {
43933 + curgid = (int)gidlist[i];
43934 + if (real == curgid)
43935 + break;
43936 + if (effective == curgid)
43937 + break;
43938 + if (fs == curgid)
43939 + break;
43940 + }
43941 + /* not in deny list */
43942 + if (i == num) {
43943 + realok = 1;
43944 + effectiveok = 1;
43945 + fsok = 1;
43946 + }
43947 + }
43948 +
43949 + if (realok && effectiveok && fsok)
43950 + return 0;
43951 + else {
43952 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43953 + return 1;
43954 + }
43955 +}
43956 +
43957 +void
43958 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43959 +{
43960 + struct acl_role_label *role = task->role;
43961 + struct acl_subject_label *subj = NULL;
43962 + struct acl_object_label *obj;
43963 + struct file *filp;
43964 +
43965 + if (unlikely(!(gr_status & GR_READY)))
43966 + return;
43967 +
43968 + filp = task->exec_file;
43969 +
43970 + /* kernel process, we'll give them the kernel role */
43971 + if (unlikely(!filp)) {
43972 + task->role = kernel_role;
43973 + task->acl = kernel_role->root_label;
43974 + return;
43975 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43976 + role = lookup_acl_role_label(task, uid, gid);
43977 +
43978 + /* perform subject lookup in possibly new role
43979 + we can use this result below in the case where role == task->role
43980 + */
43981 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43982 +
43983 + /* if we changed uid/gid, but result in the same role
43984 + and are using inheritance, don't lose the inherited subject
43985 + if current subject is other than what normal lookup
43986 + would result in, we arrived via inheritance, don't
43987 + lose subject
43988 + */
43989 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43990 + (subj == task->acl)))
43991 + task->acl = subj;
43992 +
43993 + task->role = role;
43994 +
43995 + task->is_writable = 0;
43996 +
43997 + /* ignore additional mmap checks for processes that are writable
43998 + by the default ACL */
43999 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44000 + if (unlikely(obj->mode & GR_WRITE))
44001 + task->is_writable = 1;
44002 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44003 + if (unlikely(obj->mode & GR_WRITE))
44004 + task->is_writable = 1;
44005 +
44006 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44007 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44008 +#endif
44009 +
44010 + gr_set_proc_res(task);
44011 +
44012 + return;
44013 +}
44014 +
44015 +int
44016 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
44017 + const int unsafe_share)
44018 +{
44019 + struct task_struct *task = current;
44020 + struct acl_subject_label *newacl;
44021 + struct acl_object_label *obj;
44022 + __u32 retmode;
44023 +
44024 + if (unlikely(!(gr_status & GR_READY)))
44025 + return 0;
44026 +
44027 + newacl = chk_subj_label(dentry, mnt, task->role);
44028 +
44029 + task_lock(task);
44030 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
44031 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
44032 + !(task->role->roletype & GR_ROLE_GOD) &&
44033 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
44034 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
44035 + task_unlock(task);
44036 + if (unsafe_share)
44037 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
44038 + else
44039 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
44040 + return -EACCES;
44041 + }
44042 + task_unlock(task);
44043 +
44044 + obj = chk_obj_label(dentry, mnt, task->acl);
44045 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
44046 +
44047 + if (!(task->acl->mode & GR_INHERITLEARN) &&
44048 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
44049 + if (obj->nested)
44050 + task->acl = obj->nested;
44051 + else
44052 + task->acl = newacl;
44053 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
44054 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
44055 +
44056 + task->is_writable = 0;
44057 +
44058 + /* ignore additional mmap checks for processes that are writable
44059 + by the default ACL */
44060 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
44061 + if (unlikely(obj->mode & GR_WRITE))
44062 + task->is_writable = 1;
44063 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
44064 + if (unlikely(obj->mode & GR_WRITE))
44065 + task->is_writable = 1;
44066 +
44067 + gr_set_proc_res(task);
44068 +
44069 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44070 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44071 +#endif
44072 + return 0;
44073 +}
44074 +
44075 +/* always called with valid inodev ptr */
44076 +static void
44077 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
44078 +{
44079 + struct acl_object_label *matchpo;
44080 + struct acl_subject_label *matchps;
44081 + struct acl_subject_label *subj;
44082 + struct acl_role_label *role;
44083 + unsigned int x;
44084 +
44085 + FOR_EACH_ROLE_START(role)
44086 + FOR_EACH_SUBJECT_START(role, subj, x)
44087 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
44088 + matchpo->mode |= GR_DELETED;
44089 + FOR_EACH_SUBJECT_END(subj,x)
44090 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44091 + if (subj->inode == ino && subj->device == dev)
44092 + subj->mode |= GR_DELETED;
44093 + FOR_EACH_NESTED_SUBJECT_END(subj)
44094 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
44095 + matchps->mode |= GR_DELETED;
44096 + FOR_EACH_ROLE_END(role)
44097 +
44098 + inodev->nentry->deleted = 1;
44099 +
44100 + return;
44101 +}
44102 +
44103 +void
44104 +gr_handle_delete(const ino_t ino, const dev_t dev)
44105 +{
44106 + struct inodev_entry *inodev;
44107 +
44108 + if (unlikely(!(gr_status & GR_READY)))
44109 + return;
44110 +
44111 + write_lock(&gr_inode_lock);
44112 + inodev = lookup_inodev_entry(ino, dev);
44113 + if (inodev != NULL)
44114 + do_handle_delete(inodev, ino, dev);
44115 + write_unlock(&gr_inode_lock);
44116 +
44117 + return;
44118 +}
44119 +
44120 +static void
44121 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
44122 + const ino_t newinode, const dev_t newdevice,
44123 + struct acl_subject_label *subj)
44124 +{
44125 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
44126 + struct acl_object_label *match;
44127 +
44128 + match = subj->obj_hash[index];
44129 +
44130 + while (match && (match->inode != oldinode ||
44131 + match->device != olddevice ||
44132 + !(match->mode & GR_DELETED)))
44133 + match = match->next;
44134 +
44135 + if (match && (match->inode == oldinode)
44136 + && (match->device == olddevice)
44137 + && (match->mode & GR_DELETED)) {
44138 + if (match->prev == NULL) {
44139 + subj->obj_hash[index] = match->next;
44140 + if (match->next != NULL)
44141 + match->next->prev = NULL;
44142 + } else {
44143 + match->prev->next = match->next;
44144 + if (match->next != NULL)
44145 + match->next->prev = match->prev;
44146 + }
44147 + match->prev = NULL;
44148 + match->next = NULL;
44149 + match->inode = newinode;
44150 + match->device = newdevice;
44151 + match->mode &= ~GR_DELETED;
44152 +
44153 + insert_acl_obj_label(match, subj);
44154 + }
44155 +
44156 + return;
44157 +}
44158 +
44159 +static void
44160 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44161 + const ino_t newinode, const dev_t newdevice,
44162 + struct acl_role_label *role)
44163 +{
44164 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44165 + struct acl_subject_label *match;
44166 +
44167 + match = role->subj_hash[index];
44168 +
44169 + while (match && (match->inode != oldinode ||
44170 + match->device != olddevice ||
44171 + !(match->mode & GR_DELETED)))
44172 + match = match->next;
44173 +
44174 + if (match && (match->inode == oldinode)
44175 + && (match->device == olddevice)
44176 + && (match->mode & GR_DELETED)) {
44177 + if (match->prev == NULL) {
44178 + role->subj_hash[index] = match->next;
44179 + if (match->next != NULL)
44180 + match->next->prev = NULL;
44181 + } else {
44182 + match->prev->next = match->next;
44183 + if (match->next != NULL)
44184 + match->next->prev = match->prev;
44185 + }
44186 + match->prev = NULL;
44187 + match->next = NULL;
44188 + match->inode = newinode;
44189 + match->device = newdevice;
44190 + match->mode &= ~GR_DELETED;
44191 +
44192 + insert_acl_subj_label(match, role);
44193 + }
44194 +
44195 + return;
44196 +}
44197 +
44198 +static void
44199 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44200 + const ino_t newinode, const dev_t newdevice)
44201 +{
44202 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44203 + struct inodev_entry *match;
44204 +
44205 + match = inodev_set.i_hash[index];
44206 +
44207 + while (match && (match->nentry->inode != oldinode ||
44208 + match->nentry->device != olddevice || !match->nentry->deleted))
44209 + match = match->next;
44210 +
44211 + if (match && (match->nentry->inode == oldinode)
44212 + && (match->nentry->device == olddevice) &&
44213 + match->nentry->deleted) {
44214 + if (match->prev == NULL) {
44215 + inodev_set.i_hash[index] = match->next;
44216 + if (match->next != NULL)
44217 + match->next->prev = NULL;
44218 + } else {
44219 + match->prev->next = match->next;
44220 + if (match->next != NULL)
44221 + match->next->prev = match->prev;
44222 + }
44223 + match->prev = NULL;
44224 + match->next = NULL;
44225 + match->nentry->inode = newinode;
44226 + match->nentry->device = newdevice;
44227 + match->nentry->deleted = 0;
44228 +
44229 + insert_inodev_entry(match);
44230 + }
44231 +
44232 + return;
44233 +}
44234 +
44235 +static void
44236 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44237 + const struct vfsmount *mnt)
44238 +{
44239 + struct acl_subject_label *subj;
44240 + struct acl_role_label *role;
44241 + unsigned int x;
44242 + ino_t ino = dentry->d_inode->i_ino;
44243 + dev_t dev = __get_dev(dentry);
44244 +
44245 + FOR_EACH_ROLE_START(role)
44246 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44247 +
44248 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44249 + if ((subj->inode == ino) && (subj->device == dev)) {
44250 + subj->inode = ino;
44251 + subj->device = dev;
44252 + }
44253 + FOR_EACH_NESTED_SUBJECT_END(subj)
44254 + FOR_EACH_SUBJECT_START(role, subj, x)
44255 + update_acl_obj_label(matchn->inode, matchn->device,
44256 + ino, dev, subj);
44257 + FOR_EACH_SUBJECT_END(subj,x)
44258 + FOR_EACH_ROLE_END(role)
44259 +
44260 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44261 +
44262 + return;
44263 +}
44264 +
44265 +void
44266 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44267 +{
44268 + struct name_entry *matchn;
44269 +
44270 + if (unlikely(!(gr_status & GR_READY)))
44271 + return;
44272 +
44273 + preempt_disable();
44274 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44275 +
44276 + if (unlikely((unsigned long)matchn)) {
44277 + write_lock(&gr_inode_lock);
44278 + do_handle_create(matchn, dentry, mnt);
44279 + write_unlock(&gr_inode_lock);
44280 + }
44281 + preempt_enable();
44282 +
44283 + return;
44284 +}
44285 +
44286 +void
44287 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44288 + struct dentry *old_dentry,
44289 + struct dentry *new_dentry,
44290 + struct vfsmount *mnt, const __u8 replace)
44291 +{
44292 + struct name_entry *matchn;
44293 + struct inodev_entry *inodev;
44294 + ino_t old_ino = old_dentry->d_inode->i_ino;
44295 + dev_t old_dev = __get_dev(old_dentry);
44296 +
44297 + /* vfs_rename swaps the name and parent link for old_dentry and
44298 + new_dentry
44299 + at this point, old_dentry has the new name, parent link, and inode
44300 + for the renamed file
44301 + if a file is being replaced by a rename, new_dentry has the inode
44302 + and name for the replaced file
44303 + */
44304 +
44305 + if (unlikely(!(gr_status & GR_READY)))
44306 + return;
44307 +
44308 + preempt_disable();
44309 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44310 +
44311 + /* we wouldn't have to check d_inode if it weren't for
44312 + NFS silly-renaming
44313 + */
44314 +
44315 + write_lock(&gr_inode_lock);
44316 + if (unlikely(replace && new_dentry->d_inode)) {
44317 + ino_t new_ino = new_dentry->d_inode->i_ino;
44318 + dev_t new_dev = __get_dev(new_dentry);
44319 +
44320 + inodev = lookup_inodev_entry(new_ino, new_dev);
44321 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44322 + do_handle_delete(inodev, new_ino, new_dev);
44323 + }
44324 +
44325 + inodev = lookup_inodev_entry(old_ino, old_dev);
44326 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44327 + do_handle_delete(inodev, old_ino, old_dev);
44328 +
44329 + if (unlikely((unsigned long)matchn))
44330 + do_handle_create(matchn, old_dentry, mnt);
44331 +
44332 + write_unlock(&gr_inode_lock);
44333 + preempt_enable();
44334 +
44335 + return;
44336 +}
44337 +
44338 +static int
44339 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44340 + unsigned char **sum)
44341 +{
44342 + struct acl_role_label *r;
44343 + struct role_allowed_ip *ipp;
44344 + struct role_transition *trans;
44345 + unsigned int i;
44346 + int found = 0;
44347 + u32 curr_ip = current->signal->curr_ip;
44348 +
44349 + current->signal->saved_ip = curr_ip;
44350 +
44351 + /* check transition table */
44352 +
44353 + for (trans = current->role->transitions; trans; trans = trans->next) {
44354 + if (!strcmp(rolename, trans->rolename)) {
44355 + found = 1;
44356 + break;
44357 + }
44358 + }
44359 +
44360 + if (!found)
44361 + return 0;
44362 +
44363 + /* handle special roles that do not require authentication
44364 + and check ip */
44365 +
44366 + FOR_EACH_ROLE_START(r)
44367 + if (!strcmp(rolename, r->rolename) &&
44368 + (r->roletype & GR_ROLE_SPECIAL)) {
44369 + found = 0;
44370 + if (r->allowed_ips != NULL) {
44371 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44372 + if ((ntohl(curr_ip) & ipp->netmask) ==
44373 + (ntohl(ipp->addr) & ipp->netmask))
44374 + found = 1;
44375 + }
44376 + } else
44377 + found = 2;
44378 + if (!found)
44379 + return 0;
44380 +
44381 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44382 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44383 + *salt = NULL;
44384 + *sum = NULL;
44385 + return 1;
44386 + }
44387 + }
44388 + FOR_EACH_ROLE_END(r)
44389 +
44390 + for (i = 0; i < num_sprole_pws; i++) {
44391 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44392 + *salt = acl_special_roles[i]->salt;
44393 + *sum = acl_special_roles[i]->sum;
44394 + return 1;
44395 + }
44396 + }
44397 +
44398 + return 0;
44399 +}
44400 +
44401 +static void
44402 +assign_special_role(char *rolename)
44403 +{
44404 + struct acl_object_label *obj;
44405 + struct acl_role_label *r;
44406 + struct acl_role_label *assigned = NULL;
44407 + struct task_struct *tsk;
44408 + struct file *filp;
44409 +
44410 + FOR_EACH_ROLE_START(r)
44411 + if (!strcmp(rolename, r->rolename) &&
44412 + (r->roletype & GR_ROLE_SPECIAL)) {
44413 + assigned = r;
44414 + break;
44415 + }
44416 + FOR_EACH_ROLE_END(r)
44417 +
44418 + if (!assigned)
44419 + return;
44420 +
44421 + read_lock(&tasklist_lock);
44422 + read_lock(&grsec_exec_file_lock);
44423 +
44424 + tsk = current->real_parent;
44425 + if (tsk == NULL)
44426 + goto out_unlock;
44427 +
44428 + filp = tsk->exec_file;
44429 + if (filp == NULL)
44430 + goto out_unlock;
44431 +
44432 + tsk->is_writable = 0;
44433 +
44434 + tsk->acl_sp_role = 1;
44435 + tsk->acl_role_id = ++acl_sp_role_value;
44436 + tsk->role = assigned;
44437 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44438 +
44439 + /* ignore additional mmap checks for processes that are writable
44440 + by the default ACL */
44441 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44442 + if (unlikely(obj->mode & GR_WRITE))
44443 + tsk->is_writable = 1;
44444 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44445 + if (unlikely(obj->mode & GR_WRITE))
44446 + tsk->is_writable = 1;
44447 +
44448 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44449 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44450 +#endif
44451 +
44452 +out_unlock:
44453 + read_unlock(&grsec_exec_file_lock);
44454 + read_unlock(&tasklist_lock);
44455 + return;
44456 +}
44457 +
44458 +int gr_check_secure_terminal(struct task_struct *task)
44459 +{
44460 + struct task_struct *p, *p2, *p3;
44461 + struct files_struct *files;
44462 + struct fdtable *fdt;
44463 + struct file *our_file = NULL, *file;
44464 + int i;
44465 +
44466 + if (task->signal->tty == NULL)
44467 + return 1;
44468 +
44469 + files = get_files_struct(task);
44470 + if (files != NULL) {
44471 + rcu_read_lock();
44472 + fdt = files_fdtable(files);
44473 + for (i=0; i < fdt->max_fds; i++) {
44474 + file = fcheck_files(files, i);
44475 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44476 + get_file(file);
44477 + our_file = file;
44478 + }
44479 + }
44480 + rcu_read_unlock();
44481 + put_files_struct(files);
44482 + }
44483 +
44484 + if (our_file == NULL)
44485 + return 1;
44486 +
44487 + read_lock(&tasklist_lock);
44488 + do_each_thread(p2, p) {
44489 + files = get_files_struct(p);
44490 + if (files == NULL ||
44491 + (p->signal && p->signal->tty == task->signal->tty)) {
44492 + if (files != NULL)
44493 + put_files_struct(files);
44494 + continue;
44495 + }
44496 + rcu_read_lock();
44497 + fdt = files_fdtable(files);
44498 + for (i=0; i < fdt->max_fds; i++) {
44499 + file = fcheck_files(files, i);
44500 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44501 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44502 + p3 = task;
44503 + while (p3->pid > 0) {
44504 + if (p3 == p)
44505 + break;
44506 + p3 = p3->real_parent;
44507 + }
44508 + if (p3 == p)
44509 + break;
44510 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44511 + gr_handle_alertkill(p);
44512 + rcu_read_unlock();
44513 + put_files_struct(files);
44514 + read_unlock(&tasklist_lock);
44515 + fput(our_file);
44516 + return 0;
44517 + }
44518 + }
44519 + rcu_read_unlock();
44520 + put_files_struct(files);
44521 + } while_each_thread(p2, p);
44522 + read_unlock(&tasklist_lock);
44523 +
44524 + fput(our_file);
44525 + return 1;
44526 +}
44527 +
44528 +ssize_t
44529 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44530 +{
44531 + struct gr_arg_wrapper uwrap;
44532 + unsigned char *sprole_salt = NULL;
44533 + unsigned char *sprole_sum = NULL;
44534 + int error = sizeof (struct gr_arg_wrapper);
44535 + int error2 = 0;
44536 +
44537 + mutex_lock(&gr_dev_mutex);
44538 +
44539 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44540 + error = -EPERM;
44541 + goto out;
44542 + }
44543 +
44544 + if (count != sizeof (struct gr_arg_wrapper)) {
44545 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44546 + error = -EINVAL;
44547 + goto out;
44548 + }
44549 +
44550 +
44551 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44552 + gr_auth_expires = 0;
44553 + gr_auth_attempts = 0;
44554 + }
44555 +
44556 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44557 + error = -EFAULT;
44558 + goto out;
44559 + }
44560 +
44561 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44562 + error = -EINVAL;
44563 + goto out;
44564 + }
44565 +
44566 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44567 + error = -EFAULT;
44568 + goto out;
44569 + }
44570 +
44571 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44572 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44573 + time_after(gr_auth_expires, get_seconds())) {
44574 + error = -EBUSY;
44575 + goto out;
44576 + }
44577 +
44578 + /* if non-root trying to do anything other than use a special role,
44579 + do not attempt authentication, do not count towards authentication
44580 + locking
44581 + */
44582 +
44583 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44584 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44585 + current_uid()) {
44586 + error = -EPERM;
44587 + goto out;
44588 + }
44589 +
44590 + /* ensure pw and special role name are null terminated */
44591 +
44592 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44593 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44594 +
44595 + /* Okay.
44596 + * We have our enough of the argument structure..(we have yet
44597 + * to copy_from_user the tables themselves) . Copy the tables
44598 + * only if we need them, i.e. for loading operations. */
44599 +
44600 + switch (gr_usermode->mode) {
44601 + case GR_STATUS:
44602 + if (gr_status & GR_READY) {
44603 + error = 1;
44604 + if (!gr_check_secure_terminal(current))
44605 + error = 3;
44606 + } else
44607 + error = 2;
44608 + goto out;
44609 + case GR_SHUTDOWN:
44610 + if ((gr_status & GR_READY)
44611 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44612 + pax_open_kernel();
44613 + gr_status &= ~GR_READY;
44614 + pax_close_kernel();
44615 +
44616 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44617 + free_variables();
44618 + memset(gr_usermode, 0, sizeof (struct gr_arg));
44619 + memset(gr_system_salt, 0, GR_SALT_LEN);
44620 + memset(gr_system_sum, 0, GR_SHA_LEN);
44621 + } else if (gr_status & GR_READY) {
44622 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44623 + error = -EPERM;
44624 + } else {
44625 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44626 + error = -EAGAIN;
44627 + }
44628 + break;
44629 + case GR_ENABLE:
44630 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44631 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44632 + else {
44633 + if (gr_status & GR_READY)
44634 + error = -EAGAIN;
44635 + else
44636 + error = error2;
44637 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44638 + }
44639 + break;
44640 + case GR_RELOAD:
44641 + if (!(gr_status & GR_READY)) {
44642 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44643 + error = -EAGAIN;
44644 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44645 + preempt_disable();
44646 +
44647 + pax_open_kernel();
44648 + gr_status &= ~GR_READY;
44649 + pax_close_kernel();
44650 +
44651 + free_variables();
44652 + if (!(error2 = gracl_init(gr_usermode))) {
44653 + preempt_enable();
44654 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44655 + } else {
44656 + preempt_enable();
44657 + error = error2;
44658 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44659 + }
44660 + } else {
44661 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44662 + error = -EPERM;
44663 + }
44664 + break;
44665 + case GR_SEGVMOD:
44666 + if (unlikely(!(gr_status & GR_READY))) {
44667 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44668 + error = -EAGAIN;
44669 + break;
44670 + }
44671 +
44672 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44673 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44674 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44675 + struct acl_subject_label *segvacl;
44676 + segvacl =
44677 + lookup_acl_subj_label(gr_usermode->segv_inode,
44678 + gr_usermode->segv_device,
44679 + current->role);
44680 + if (segvacl) {
44681 + segvacl->crashes = 0;
44682 + segvacl->expires = 0;
44683 + }
44684 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44685 + gr_remove_uid(gr_usermode->segv_uid);
44686 + }
44687 + } else {
44688 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44689 + error = -EPERM;
44690 + }
44691 + break;
44692 + case GR_SPROLE:
44693 + case GR_SPROLEPAM:
44694 + if (unlikely(!(gr_status & GR_READY))) {
44695 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44696 + error = -EAGAIN;
44697 + break;
44698 + }
44699 +
44700 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44701 + current->role->expires = 0;
44702 + current->role->auth_attempts = 0;
44703 + }
44704 +
44705 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44706 + time_after(current->role->expires, get_seconds())) {
44707 + error = -EBUSY;
44708 + goto out;
44709 + }
44710 +
44711 + if (lookup_special_role_auth
44712 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44713 + && ((!sprole_salt && !sprole_sum)
44714 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44715 + char *p = "";
44716 + assign_special_role(gr_usermode->sp_role);
44717 + read_lock(&tasklist_lock);
44718 + if (current->real_parent)
44719 + p = current->real_parent->role->rolename;
44720 + read_unlock(&tasklist_lock);
44721 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44722 + p, acl_sp_role_value);
44723 + } else {
44724 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44725 + error = -EPERM;
44726 + if(!(current->role->auth_attempts++))
44727 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44728 +
44729 + goto out;
44730 + }
44731 + break;
44732 + case GR_UNSPROLE:
44733 + if (unlikely(!(gr_status & GR_READY))) {
44734 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44735 + error = -EAGAIN;
44736 + break;
44737 + }
44738 +
44739 + if (current->role->roletype & GR_ROLE_SPECIAL) {
44740 + char *p = "";
44741 + int i = 0;
44742 +
44743 + read_lock(&tasklist_lock);
44744 + if (current->real_parent) {
44745 + p = current->real_parent->role->rolename;
44746 + i = current->real_parent->acl_role_id;
44747 + }
44748 + read_unlock(&tasklist_lock);
44749 +
44750 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44751 + gr_set_acls(1);
44752 + } else {
44753 + error = -EPERM;
44754 + goto out;
44755 + }
44756 + break;
44757 + default:
44758 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44759 + error = -EINVAL;
44760 + break;
44761 + }
44762 +
44763 + if (error != -EPERM)
44764 + goto out;
44765 +
44766 + if(!(gr_auth_attempts++))
44767 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44768 +
44769 + out:
44770 + mutex_unlock(&gr_dev_mutex);
44771 + return error;
44772 +}
44773 +
44774 +/* must be called with
44775 + rcu_read_lock();
44776 + read_lock(&tasklist_lock);
44777 + read_lock(&grsec_exec_file_lock);
44778 +*/
44779 +int gr_apply_subject_to_task(struct task_struct *task)
44780 +{
44781 + struct acl_object_label *obj;
44782 + char *tmpname;
44783 + struct acl_subject_label *tmpsubj;
44784 + struct file *filp;
44785 + struct name_entry *nmatch;
44786 +
44787 + filp = task->exec_file;
44788 + if (filp == NULL)
44789 + return 0;
44790 +
44791 + /* the following is to apply the correct subject
44792 + on binaries running when the RBAC system
44793 + is enabled, when the binaries have been
44794 + replaced or deleted since their execution
44795 + -----
44796 + when the RBAC system starts, the inode/dev
44797 + from exec_file will be one the RBAC system
44798 + is unaware of. It only knows the inode/dev
44799 + of the present file on disk, or the absence
44800 + of it.
44801 + */
44802 + preempt_disable();
44803 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44804 +
44805 + nmatch = lookup_name_entry(tmpname);
44806 + preempt_enable();
44807 + tmpsubj = NULL;
44808 + if (nmatch) {
44809 + if (nmatch->deleted)
44810 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44811 + else
44812 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44813 + if (tmpsubj != NULL)
44814 + task->acl = tmpsubj;
44815 + }
44816 + if (tmpsubj == NULL)
44817 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44818 + task->role);
44819 + if (task->acl) {
44820 + task->is_writable = 0;
44821 + /* ignore additional mmap checks for processes that are writable
44822 + by the default ACL */
44823 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44824 + if (unlikely(obj->mode & GR_WRITE))
44825 + task->is_writable = 1;
44826 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44827 + if (unlikely(obj->mode & GR_WRITE))
44828 + task->is_writable = 1;
44829 +
44830 + gr_set_proc_res(task);
44831 +
44832 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44833 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44834 +#endif
44835 + } else {
44836 + return 1;
44837 + }
44838 +
44839 + return 0;
44840 +}
44841 +
44842 +int
44843 +gr_set_acls(const int type)
44844 +{
44845 + struct task_struct *task, *task2;
44846 + struct acl_role_label *role = current->role;
44847 + __u16 acl_role_id = current->acl_role_id;
44848 + const struct cred *cred;
44849 + int ret;
44850 +
44851 + rcu_read_lock();
44852 + read_lock(&tasklist_lock);
44853 + read_lock(&grsec_exec_file_lock);
44854 + do_each_thread(task2, task) {
44855 + /* check to see if we're called from the exit handler,
44856 + if so, only replace ACLs that have inherited the admin
44857 + ACL */
44858 +
44859 + if (type && (task->role != role ||
44860 + task->acl_role_id != acl_role_id))
44861 + continue;
44862 +
44863 + task->acl_role_id = 0;
44864 + task->acl_sp_role = 0;
44865 +
44866 + if (task->exec_file) {
44867 + cred = __task_cred(task);
44868 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44869 + ret = gr_apply_subject_to_task(task);
44870 + if (ret) {
44871 + read_unlock(&grsec_exec_file_lock);
44872 + read_unlock(&tasklist_lock);
44873 + rcu_read_unlock();
44874 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44875 + return ret;
44876 + }
44877 + } else {
44878 + // it's a kernel process
44879 + task->role = kernel_role;
44880 + task->acl = kernel_role->root_label;
44881 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44882 + task->acl->mode &= ~GR_PROCFIND;
44883 +#endif
44884 + }
44885 + } while_each_thread(task2, task);
44886 + read_unlock(&grsec_exec_file_lock);
44887 + read_unlock(&tasklist_lock);
44888 + rcu_read_unlock();
44889 +
44890 + return 0;
44891 +}
44892 +
44893 +void
44894 +gr_learn_resource(const struct task_struct *task,
44895 + const int res, const unsigned long wanted, const int gt)
44896 +{
44897 + struct acl_subject_label *acl;
44898 + const struct cred *cred;
44899 +
44900 + if (unlikely((gr_status & GR_READY) &&
44901 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44902 + goto skip_reslog;
44903 +
44904 +#ifdef CONFIG_GRKERNSEC_RESLOG
44905 + gr_log_resource(task, res, wanted, gt);
44906 +#endif
44907 + skip_reslog:
44908 +
44909 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44910 + return;
44911 +
44912 + acl = task->acl;
44913 +
44914 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44915 + !(acl->resmask & (1 << (unsigned short) res))))
44916 + return;
44917 +
44918 + if (wanted >= acl->res[res].rlim_cur) {
44919 + unsigned long res_add;
44920 +
44921 + res_add = wanted;
44922 + switch (res) {
44923 + case RLIMIT_CPU:
44924 + res_add += GR_RLIM_CPU_BUMP;
44925 + break;
44926 + case RLIMIT_FSIZE:
44927 + res_add += GR_RLIM_FSIZE_BUMP;
44928 + break;
44929 + case RLIMIT_DATA:
44930 + res_add += GR_RLIM_DATA_BUMP;
44931 + break;
44932 + case RLIMIT_STACK:
44933 + res_add += GR_RLIM_STACK_BUMP;
44934 + break;
44935 + case RLIMIT_CORE:
44936 + res_add += GR_RLIM_CORE_BUMP;
44937 + break;
44938 + case RLIMIT_RSS:
44939 + res_add += GR_RLIM_RSS_BUMP;
44940 + break;
44941 + case RLIMIT_NPROC:
44942 + res_add += GR_RLIM_NPROC_BUMP;
44943 + break;
44944 + case RLIMIT_NOFILE:
44945 + res_add += GR_RLIM_NOFILE_BUMP;
44946 + break;
44947 + case RLIMIT_MEMLOCK:
44948 + res_add += GR_RLIM_MEMLOCK_BUMP;
44949 + break;
44950 + case RLIMIT_AS:
44951 + res_add += GR_RLIM_AS_BUMP;
44952 + break;
44953 + case RLIMIT_LOCKS:
44954 + res_add += GR_RLIM_LOCKS_BUMP;
44955 + break;
44956 + case RLIMIT_SIGPENDING:
44957 + res_add += GR_RLIM_SIGPENDING_BUMP;
44958 + break;
44959 + case RLIMIT_MSGQUEUE:
44960 + res_add += GR_RLIM_MSGQUEUE_BUMP;
44961 + break;
44962 + case RLIMIT_NICE:
44963 + res_add += GR_RLIM_NICE_BUMP;
44964 + break;
44965 + case RLIMIT_RTPRIO:
44966 + res_add += GR_RLIM_RTPRIO_BUMP;
44967 + break;
44968 + case RLIMIT_RTTIME:
44969 + res_add += GR_RLIM_RTTIME_BUMP;
44970 + break;
44971 + }
44972 +
44973 + acl->res[res].rlim_cur = res_add;
44974 +
44975 + if (wanted > acl->res[res].rlim_max)
44976 + acl->res[res].rlim_max = res_add;
44977 +
44978 + /* only log the subject filename, since resource logging is supported for
44979 + single-subject learning only */
44980 + rcu_read_lock();
44981 + cred = __task_cred(task);
44982 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44983 + task->role->roletype, cred->uid, cred->gid, acl->filename,
44984 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44985 + "", (unsigned long) res, &task->signal->saved_ip);
44986 + rcu_read_unlock();
44987 + }
44988 +
44989 + return;
44990 +}
44991 +
44992 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44993 +void
44994 +pax_set_initial_flags(struct linux_binprm *bprm)
44995 +{
44996 + struct task_struct *task = current;
44997 + struct acl_subject_label *proc;
44998 + unsigned long flags;
44999 +
45000 + if (unlikely(!(gr_status & GR_READY)))
45001 + return;
45002 +
45003 + flags = pax_get_flags(task);
45004 +
45005 + proc = task->acl;
45006 +
45007 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
45008 + flags &= ~MF_PAX_PAGEEXEC;
45009 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
45010 + flags &= ~MF_PAX_SEGMEXEC;
45011 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
45012 + flags &= ~MF_PAX_RANDMMAP;
45013 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
45014 + flags &= ~MF_PAX_EMUTRAMP;
45015 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
45016 + flags &= ~MF_PAX_MPROTECT;
45017 +
45018 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
45019 + flags |= MF_PAX_PAGEEXEC;
45020 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
45021 + flags |= MF_PAX_SEGMEXEC;
45022 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
45023 + flags |= MF_PAX_RANDMMAP;
45024 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
45025 + flags |= MF_PAX_EMUTRAMP;
45026 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
45027 + flags |= MF_PAX_MPROTECT;
45028 +
45029 + pax_set_flags(task, flags);
45030 +
45031 + return;
45032 +}
45033 +#endif
45034 +
45035 +#ifdef CONFIG_SYSCTL
45036 +/* Eric Biederman likes breaking userland ABI and every inode-based security
45037 + system to save 35kb of memory */
45038 +
45039 +/* we modify the passed in filename, but adjust it back before returning */
45040 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
45041 +{
45042 + struct name_entry *nmatch;
45043 + char *p, *lastp = NULL;
45044 + struct acl_object_label *obj = NULL, *tmp;
45045 + struct acl_subject_label *tmpsubj;
45046 + char c = '\0';
45047 +
45048 + read_lock(&gr_inode_lock);
45049 +
45050 + p = name + len - 1;
45051 + do {
45052 + nmatch = lookup_name_entry(name);
45053 + if (lastp != NULL)
45054 + *lastp = c;
45055 +
45056 + if (nmatch == NULL)
45057 + goto next_component;
45058 + tmpsubj = current->acl;
45059 + do {
45060 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
45061 + if (obj != NULL) {
45062 + tmp = obj->globbed;
45063 + while (tmp) {
45064 + if (!glob_match(tmp->filename, name)) {
45065 + obj = tmp;
45066 + goto found_obj;
45067 + }
45068 + tmp = tmp->next;
45069 + }
45070 + goto found_obj;
45071 + }
45072 + } while ((tmpsubj = tmpsubj->parent_subject));
45073 +next_component:
45074 + /* end case */
45075 + if (p == name)
45076 + break;
45077 +
45078 + while (*p != '/')
45079 + p--;
45080 + if (p == name)
45081 + lastp = p + 1;
45082 + else {
45083 + lastp = p;
45084 + p--;
45085 + }
45086 + c = *lastp;
45087 + *lastp = '\0';
45088 + } while (1);
45089 +found_obj:
45090 + read_unlock(&gr_inode_lock);
45091 + /* obj returned will always be non-null */
45092 + return obj;
45093 +}
45094 +
45095 +/* returns 0 when allowing, non-zero on error
45096 + op of 0 is used for readdir, so we don't log the names of hidden files
45097 +*/
45098 +__u32
45099 +gr_handle_sysctl(const struct ctl_table *table, const int op)
45100 +{
45101 + struct ctl_table *tmp;
45102 + const char *proc_sys = "/proc/sys";
45103 + char *path;
45104 + struct acl_object_label *obj;
45105 + unsigned short len = 0, pos = 0, depth = 0, i;
45106 + __u32 err = 0;
45107 + __u32 mode = 0;
45108 +
45109 + if (unlikely(!(gr_status & GR_READY)))
45110 + return 0;
45111 +
45112 + /* for now, ignore operations on non-sysctl entries if it's not a
45113 + readdir*/
45114 + if (table->child != NULL && op != 0)
45115 + return 0;
45116 +
45117 + mode |= GR_FIND;
45118 + /* it's only a read if it's an entry, read on dirs is for readdir */
45119 + if (op & MAY_READ)
45120 + mode |= GR_READ;
45121 + if (op & MAY_WRITE)
45122 + mode |= GR_WRITE;
45123 +
45124 + preempt_disable();
45125 +
45126 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
45127 +
45128 + /* it's only a read/write if it's an actual entry, not a dir
45129 + (which are opened for readdir)
45130 + */
45131 +
45132 + /* convert the requested sysctl entry into a pathname */
45133 +
45134 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45135 + len += strlen(tmp->procname);
45136 + len++;
45137 + depth++;
45138 + }
45139 +
45140 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45141 + /* deny */
45142 + goto out;
45143 + }
45144 +
45145 + memset(path, 0, PAGE_SIZE);
45146 +
45147 + memcpy(path, proc_sys, strlen(proc_sys));
45148 +
45149 + pos += strlen(proc_sys);
45150 +
45151 + for (; depth > 0; depth--) {
45152 + path[pos] = '/';
45153 + pos++;
45154 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45155 + if (depth == i) {
45156 + memcpy(path + pos, tmp->procname,
45157 + strlen(tmp->procname));
45158 + pos += strlen(tmp->procname);
45159 + }
45160 + i++;
45161 + }
45162 + }
45163 +
45164 + obj = gr_lookup_by_name(path, pos);
45165 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45166 +
45167 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45168 + ((err & mode) != mode))) {
45169 + __u32 new_mode = mode;
45170 +
45171 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45172 +
45173 + err = 0;
45174 + gr_log_learn_sysctl(path, new_mode);
45175 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45176 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45177 + err = -ENOENT;
45178 + } else if (!(err & GR_FIND)) {
45179 + err = -ENOENT;
45180 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45181 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45182 + path, (mode & GR_READ) ? " reading" : "",
45183 + (mode & GR_WRITE) ? " writing" : "");
45184 + err = -EACCES;
45185 + } else if ((err & mode) != mode) {
45186 + err = -EACCES;
45187 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45188 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45189 + path, (mode & GR_READ) ? " reading" : "",
45190 + (mode & GR_WRITE) ? " writing" : "");
45191 + err = 0;
45192 + } else
45193 + err = 0;
45194 +
45195 + out:
45196 + preempt_enable();
45197 +
45198 + return err;
45199 +}
45200 +#endif
45201 +
45202 +int
45203 +gr_handle_proc_ptrace(struct task_struct *task)
45204 +{
45205 + struct file *filp;
45206 + struct task_struct *tmp = task;
45207 + struct task_struct *curtemp = current;
45208 + __u32 retmode;
45209 +
45210 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45211 + if (unlikely(!(gr_status & GR_READY)))
45212 + return 0;
45213 +#endif
45214 +
45215 + read_lock(&tasklist_lock);
45216 + read_lock(&grsec_exec_file_lock);
45217 + filp = task->exec_file;
45218 +
45219 + while (tmp->pid > 0) {
45220 + if (tmp == curtemp)
45221 + break;
45222 + tmp = tmp->real_parent;
45223 + }
45224 +
45225 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45226 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45227 + read_unlock(&grsec_exec_file_lock);
45228 + read_unlock(&tasklist_lock);
45229 + return 1;
45230 + }
45231 +
45232 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45233 + if (!(gr_status & GR_READY)) {
45234 + read_unlock(&grsec_exec_file_lock);
45235 + read_unlock(&tasklist_lock);
45236 + return 0;
45237 + }
45238 +#endif
45239 +
45240 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45241 + read_unlock(&grsec_exec_file_lock);
45242 + read_unlock(&tasklist_lock);
45243 +
45244 + if (retmode & GR_NOPTRACE)
45245 + return 1;
45246 +
45247 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45248 + && (current->acl != task->acl || (current->acl != current->role->root_label
45249 + && current->pid != task->pid)))
45250 + return 1;
45251 +
45252 + return 0;
45253 +}
45254 +
45255 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45256 +{
45257 + if (unlikely(!(gr_status & GR_READY)))
45258 + return;
45259 +
45260 + if (!(current->role->roletype & GR_ROLE_GOD))
45261 + return;
45262 +
45263 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45264 + p->role->rolename, gr_task_roletype_to_char(p),
45265 + p->acl->filename);
45266 +}
45267 +
45268 +int
45269 +gr_handle_ptrace(struct task_struct *task, const long request)
45270 +{
45271 + struct task_struct *tmp = task;
45272 + struct task_struct *curtemp = current;
45273 + __u32 retmode;
45274 +
45275 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45276 + if (unlikely(!(gr_status & GR_READY)))
45277 + return 0;
45278 +#endif
45279 +
45280 + read_lock(&tasklist_lock);
45281 + while (tmp->pid > 0) {
45282 + if (tmp == curtemp)
45283 + break;
45284 + tmp = tmp->real_parent;
45285 + }
45286 +
45287 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45288 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45289 + read_unlock(&tasklist_lock);
45290 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45291 + return 1;
45292 + }
45293 + read_unlock(&tasklist_lock);
45294 +
45295 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45296 + if (!(gr_status & GR_READY))
45297 + return 0;
45298 +#endif
45299 +
45300 + read_lock(&grsec_exec_file_lock);
45301 + if (unlikely(!task->exec_file)) {
45302 + read_unlock(&grsec_exec_file_lock);
45303 + return 0;
45304 + }
45305 +
45306 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45307 + read_unlock(&grsec_exec_file_lock);
45308 +
45309 + if (retmode & GR_NOPTRACE) {
45310 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45311 + return 1;
45312 + }
45313 +
45314 + if (retmode & GR_PTRACERD) {
45315 + switch (request) {
45316 + case PTRACE_POKETEXT:
45317 + case PTRACE_POKEDATA:
45318 + case PTRACE_POKEUSR:
45319 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45320 + case PTRACE_SETREGS:
45321 + case PTRACE_SETFPREGS:
45322 +#endif
45323 +#ifdef CONFIG_X86
45324 + case PTRACE_SETFPXREGS:
45325 +#endif
45326 +#ifdef CONFIG_ALTIVEC
45327 + case PTRACE_SETVRREGS:
45328 +#endif
45329 + return 1;
45330 + default:
45331 + return 0;
45332 + }
45333 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
45334 + !(current->role->roletype & GR_ROLE_GOD) &&
45335 + (current->acl != task->acl)) {
45336 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45337 + return 1;
45338 + }
45339 +
45340 + return 0;
45341 +}
45342 +
45343 +static int is_writable_mmap(const struct file *filp)
45344 +{
45345 + struct task_struct *task = current;
45346 + struct acl_object_label *obj, *obj2;
45347 +
45348 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45349 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45350 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45351 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45352 + task->role->root_label);
45353 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45354 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45355 + return 1;
45356 + }
45357 + }
45358 + return 0;
45359 +}
45360 +
45361 +int
45362 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45363 +{
45364 + __u32 mode;
45365 +
45366 + if (unlikely(!file || !(prot & PROT_EXEC)))
45367 + return 1;
45368 +
45369 + if (is_writable_mmap(file))
45370 + return 0;
45371 +
45372 + mode =
45373 + gr_search_file(file->f_path.dentry,
45374 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45375 + file->f_path.mnt);
45376 +
45377 + if (!gr_tpe_allow(file))
45378 + return 0;
45379 +
45380 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45381 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45382 + return 0;
45383 + } else if (unlikely(!(mode & GR_EXEC))) {
45384 + return 0;
45385 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45386 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45387 + return 1;
45388 + }
45389 +
45390 + return 1;
45391 +}
45392 +
45393 +int
45394 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45395 +{
45396 + __u32 mode;
45397 +
45398 + if (unlikely(!file || !(prot & PROT_EXEC)))
45399 + return 1;
45400 +
45401 + if (is_writable_mmap(file))
45402 + return 0;
45403 +
45404 + mode =
45405 + gr_search_file(file->f_path.dentry,
45406 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45407 + file->f_path.mnt);
45408 +
45409 + if (!gr_tpe_allow(file))
45410 + return 0;
45411 +
45412 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45413 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45414 + return 0;
45415 + } else if (unlikely(!(mode & GR_EXEC))) {
45416 + return 0;
45417 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45418 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45419 + return 1;
45420 + }
45421 +
45422 + return 1;
45423 +}
45424 +
45425 +void
45426 +gr_acl_handle_psacct(struct task_struct *task, const long code)
45427 +{
45428 + unsigned long runtime;
45429 + unsigned long cputime;
45430 + unsigned int wday, cday;
45431 + __u8 whr, chr;
45432 + __u8 wmin, cmin;
45433 + __u8 wsec, csec;
45434 + struct timespec timeval;
45435 +
45436 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45437 + !(task->acl->mode & GR_PROCACCT)))
45438 + return;
45439 +
45440 + do_posix_clock_monotonic_gettime(&timeval);
45441 + runtime = timeval.tv_sec - task->start_time.tv_sec;
45442 + wday = runtime / (3600 * 24);
45443 + runtime -= wday * (3600 * 24);
45444 + whr = runtime / 3600;
45445 + runtime -= whr * 3600;
45446 + wmin = runtime / 60;
45447 + runtime -= wmin * 60;
45448 + wsec = runtime;
45449 +
45450 + cputime = (task->utime + task->stime) / HZ;
45451 + cday = cputime / (3600 * 24);
45452 + cputime -= cday * (3600 * 24);
45453 + chr = cputime / 3600;
45454 + cputime -= chr * 3600;
45455 + cmin = cputime / 60;
45456 + cputime -= cmin * 60;
45457 + csec = cputime;
45458 +
45459 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45460 +
45461 + return;
45462 +}
45463 +
45464 +void gr_set_kernel_label(struct task_struct *task)
45465 +{
45466 + if (gr_status & GR_READY) {
45467 + task->role = kernel_role;
45468 + task->acl = kernel_role->root_label;
45469 + }
45470 + return;
45471 +}
45472 +
45473 +#ifdef CONFIG_TASKSTATS
45474 +int gr_is_taskstats_denied(int pid)
45475 +{
45476 + struct task_struct *task;
45477 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45478 + const struct cred *cred;
45479 +#endif
45480 + int ret = 0;
45481 +
45482 + /* restrict taskstats viewing to un-chrooted root users
45483 + who have the 'view' subject flag if the RBAC system is enabled
45484 + */
45485 +
45486 + rcu_read_lock();
45487 + read_lock(&tasklist_lock);
45488 + task = find_task_by_vpid(pid);
45489 + if (task) {
45490 +#ifdef CONFIG_GRKERNSEC_CHROOT
45491 + if (proc_is_chrooted(task))
45492 + ret = -EACCES;
45493 +#endif
45494 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45495 + cred = __task_cred(task);
45496 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45497 + if (cred->uid != 0)
45498 + ret = -EACCES;
45499 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45500 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45501 + ret = -EACCES;
45502 +#endif
45503 +#endif
45504 + if (gr_status & GR_READY) {
45505 + if (!(task->acl->mode & GR_VIEW))
45506 + ret = -EACCES;
45507 + }
45508 + } else
45509 + ret = -ENOENT;
45510 +
45511 + read_unlock(&tasklist_lock);
45512 + rcu_read_unlock();
45513 +
45514 + return ret;
45515 +}
45516 +#endif
45517 +
45518 +/* AUXV entries are filled via a descendant of search_binary_handler
45519 + after we've already applied the subject for the target
45520 +*/
45521 +int gr_acl_enable_at_secure(void)
45522 +{
45523 + if (unlikely(!(gr_status & GR_READY)))
45524 + return 0;
45525 +
45526 + if (current->acl->mode & GR_ATSECURE)
45527 + return 1;
45528 +
45529 + return 0;
45530 +}
45531 +
45532 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45533 +{
45534 + struct task_struct *task = current;
45535 + struct dentry *dentry = file->f_path.dentry;
45536 + struct vfsmount *mnt = file->f_path.mnt;
45537 + struct acl_object_label *obj, *tmp;
45538 + struct acl_subject_label *subj;
45539 + unsigned int bufsize;
45540 + int is_not_root;
45541 + char *path;
45542 + dev_t dev = __get_dev(dentry);
45543 +
45544 + if (unlikely(!(gr_status & GR_READY)))
45545 + return 1;
45546 +
45547 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45548 + return 1;
45549 +
45550 + /* ignore Eric Biederman */
45551 + if (IS_PRIVATE(dentry->d_inode))
45552 + return 1;
45553 +
45554 + subj = task->acl;
45555 + do {
45556 + obj = lookup_acl_obj_label(ino, dev, subj);
45557 + if (obj != NULL)
45558 + return (obj->mode & GR_FIND) ? 1 : 0;
45559 + } while ((subj = subj->parent_subject));
45560 +
45561 + /* this is purely an optimization since we're looking for an object
45562 + for the directory we're doing a readdir on
45563 + if it's possible for any globbed object to match the entry we're
45564 + filling into the directory, then the object we find here will be
45565 + an anchor point with attached globbed objects
45566 + */
45567 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45568 + if (obj->globbed == NULL)
45569 + return (obj->mode & GR_FIND) ? 1 : 0;
45570 +
45571 + is_not_root = ((obj->filename[0] == '/') &&
45572 + (obj->filename[1] == '\0')) ? 0 : 1;
45573 + bufsize = PAGE_SIZE - namelen - is_not_root;
45574 +
45575 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
45576 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45577 + return 1;
45578 +
45579 + preempt_disable();
45580 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45581 + bufsize);
45582 +
45583 + bufsize = strlen(path);
45584 +
45585 + /* if base is "/", don't append an additional slash */
45586 + if (is_not_root)
45587 + *(path + bufsize) = '/';
45588 + memcpy(path + bufsize + is_not_root, name, namelen);
45589 + *(path + bufsize + namelen + is_not_root) = '\0';
45590 +
45591 + tmp = obj->globbed;
45592 + while (tmp) {
45593 + if (!glob_match(tmp->filename, path)) {
45594 + preempt_enable();
45595 + return (tmp->mode & GR_FIND) ? 1 : 0;
45596 + }
45597 + tmp = tmp->next;
45598 + }
45599 + preempt_enable();
45600 + return (obj->mode & GR_FIND) ? 1 : 0;
45601 +}
45602 +
45603 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45604 +EXPORT_SYMBOL(gr_acl_is_enabled);
45605 +#endif
45606 +EXPORT_SYMBOL(gr_learn_resource);
45607 +EXPORT_SYMBOL(gr_set_kernel_label);
45608 +#ifdef CONFIG_SECURITY
45609 +EXPORT_SYMBOL(gr_check_user_change);
45610 +EXPORT_SYMBOL(gr_check_group_change);
45611 +#endif
45612 +
45613 diff -urNp linux-2.6.39.4/grsecurity/gracl_cap.c linux-2.6.39.4/grsecurity/gracl_cap.c
45614 --- linux-2.6.39.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45615 +++ linux-2.6.39.4/grsecurity/gracl_cap.c 2011-08-05 19:44:37.000000000 -0400
45616 @@ -0,0 +1,139 @@
45617 +#include <linux/kernel.h>
45618 +#include <linux/module.h>
45619 +#include <linux/sched.h>
45620 +#include <linux/gracl.h>
45621 +#include <linux/grsecurity.h>
45622 +#include <linux/grinternal.h>
45623 +
45624 +static const char *captab_log[] = {
45625 + "CAP_CHOWN",
45626 + "CAP_DAC_OVERRIDE",
45627 + "CAP_DAC_READ_SEARCH",
45628 + "CAP_FOWNER",
45629 + "CAP_FSETID",
45630 + "CAP_KILL",
45631 + "CAP_SETGID",
45632 + "CAP_SETUID",
45633 + "CAP_SETPCAP",
45634 + "CAP_LINUX_IMMUTABLE",
45635 + "CAP_NET_BIND_SERVICE",
45636 + "CAP_NET_BROADCAST",
45637 + "CAP_NET_ADMIN",
45638 + "CAP_NET_RAW",
45639 + "CAP_IPC_LOCK",
45640 + "CAP_IPC_OWNER",
45641 + "CAP_SYS_MODULE",
45642 + "CAP_SYS_RAWIO",
45643 + "CAP_SYS_CHROOT",
45644 + "CAP_SYS_PTRACE",
45645 + "CAP_SYS_PACCT",
45646 + "CAP_SYS_ADMIN",
45647 + "CAP_SYS_BOOT",
45648 + "CAP_SYS_NICE",
45649 + "CAP_SYS_RESOURCE",
45650 + "CAP_SYS_TIME",
45651 + "CAP_SYS_TTY_CONFIG",
45652 + "CAP_MKNOD",
45653 + "CAP_LEASE",
45654 + "CAP_AUDIT_WRITE",
45655 + "CAP_AUDIT_CONTROL",
45656 + "CAP_SETFCAP",
45657 + "CAP_MAC_OVERRIDE",
45658 + "CAP_MAC_ADMIN",
45659 + "CAP_SYSLOG"
45660 +};
45661 +
45662 +EXPORT_SYMBOL(gr_is_capable);
45663 +EXPORT_SYMBOL(gr_is_capable_nolog);
45664 +
45665 +int
45666 +gr_is_capable(const int cap)
45667 +{
45668 + struct task_struct *task = current;
45669 + const struct cred *cred = current_cred();
45670 + struct acl_subject_label *curracl;
45671 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45672 + kernel_cap_t cap_audit = __cap_empty_set;
45673 +
45674 + if (!gr_acl_is_enabled())
45675 + return 1;
45676 +
45677 + curracl = task->acl;
45678 +
45679 + cap_drop = curracl->cap_lower;
45680 + cap_mask = curracl->cap_mask;
45681 + cap_audit = curracl->cap_invert_audit;
45682 +
45683 + while ((curracl = curracl->parent_subject)) {
45684 + /* if the cap isn't specified in the current computed mask but is specified in the
45685 + current level subject, and is lowered in the current level subject, then add
45686 + it to the set of dropped capabilities
45687 + otherwise, add the current level subject's mask to the current computed mask
45688 + */
45689 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45690 + cap_raise(cap_mask, cap);
45691 + if (cap_raised(curracl->cap_lower, cap))
45692 + cap_raise(cap_drop, cap);
45693 + if (cap_raised(curracl->cap_invert_audit, cap))
45694 + cap_raise(cap_audit, cap);
45695 + }
45696 + }
45697 +
45698 + if (!cap_raised(cap_drop, cap)) {
45699 + if (cap_raised(cap_audit, cap))
45700 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45701 + return 1;
45702 + }
45703 +
45704 + curracl = task->acl;
45705 +
45706 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45707 + && cap_raised(cred->cap_effective, cap)) {
45708 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45709 + task->role->roletype, cred->uid,
45710 + cred->gid, task->exec_file ?
45711 + gr_to_filename(task->exec_file->f_path.dentry,
45712 + task->exec_file->f_path.mnt) : curracl->filename,
45713 + curracl->filename, 0UL,
45714 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45715 + return 1;
45716 + }
45717 +
45718 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45719 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45720 + return 0;
45721 +}
45722 +
45723 +int
45724 +gr_is_capable_nolog(const int cap)
45725 +{
45726 + struct acl_subject_label *curracl;
45727 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45728 +
45729 + if (!gr_acl_is_enabled())
45730 + return 1;
45731 +
45732 + curracl = current->acl;
45733 +
45734 + cap_drop = curracl->cap_lower;
45735 + cap_mask = curracl->cap_mask;
45736 +
45737 + while ((curracl = curracl->parent_subject)) {
45738 + /* if the cap isn't specified in the current computed mask but is specified in the
45739 + current level subject, and is lowered in the current level subject, then add
45740 + it to the set of dropped capabilities
45741 + otherwise, add the current level subject's mask to the current computed mask
45742 + */
45743 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45744 + cap_raise(cap_mask, cap);
45745 + if (cap_raised(curracl->cap_lower, cap))
45746 + cap_raise(cap_drop, cap);
45747 + }
45748 + }
45749 +
45750 + if (!cap_raised(cap_drop, cap))
45751 + return 1;
45752 +
45753 + return 0;
45754 +}
45755 +
45756 diff -urNp linux-2.6.39.4/grsecurity/gracl_fs.c linux-2.6.39.4/grsecurity/gracl_fs.c
45757 --- linux-2.6.39.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45758 +++ linux-2.6.39.4/grsecurity/gracl_fs.c 2011-08-05 19:44:37.000000000 -0400
45759 @@ -0,0 +1,431 @@
45760 +#include <linux/kernel.h>
45761 +#include <linux/sched.h>
45762 +#include <linux/types.h>
45763 +#include <linux/fs.h>
45764 +#include <linux/file.h>
45765 +#include <linux/stat.h>
45766 +#include <linux/grsecurity.h>
45767 +#include <linux/grinternal.h>
45768 +#include <linux/gracl.h>
45769 +
45770 +__u32
45771 +gr_acl_handle_hidden_file(const struct dentry * dentry,
45772 + const struct vfsmount * mnt)
45773 +{
45774 + __u32 mode;
45775 +
45776 + if (unlikely(!dentry->d_inode))
45777 + return GR_FIND;
45778 +
45779 + mode =
45780 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45781 +
45782 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45783 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45784 + return mode;
45785 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45786 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45787 + return 0;
45788 + } else if (unlikely(!(mode & GR_FIND)))
45789 + return 0;
45790 +
45791 + return GR_FIND;
45792 +}
45793 +
45794 +__u32
45795 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45796 + const int fmode)
45797 +{
45798 + __u32 reqmode = GR_FIND;
45799 + __u32 mode;
45800 +
45801 + if (unlikely(!dentry->d_inode))
45802 + return reqmode;
45803 +
45804 + if (unlikely(fmode & O_APPEND))
45805 + reqmode |= GR_APPEND;
45806 + else if (unlikely(fmode & FMODE_WRITE))
45807 + reqmode |= GR_WRITE;
45808 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45809 + reqmode |= GR_READ;
45810 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45811 + reqmode &= ~GR_READ;
45812 + mode =
45813 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45814 + mnt);
45815 +
45816 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45817 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45818 + reqmode & GR_READ ? " reading" : "",
45819 + reqmode & GR_WRITE ? " writing" : reqmode &
45820 + GR_APPEND ? " appending" : "");
45821 + return reqmode;
45822 + } else
45823 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45824 + {
45825 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45826 + reqmode & GR_READ ? " reading" : "",
45827 + reqmode & GR_WRITE ? " writing" : reqmode &
45828 + GR_APPEND ? " appending" : "");
45829 + return 0;
45830 + } else if (unlikely((mode & reqmode) != reqmode))
45831 + return 0;
45832 +
45833 + return reqmode;
45834 +}
45835 +
45836 +__u32
45837 +gr_acl_handle_creat(const struct dentry * dentry,
45838 + const struct dentry * p_dentry,
45839 + const struct vfsmount * p_mnt, const int fmode,
45840 + const int imode)
45841 +{
45842 + __u32 reqmode = GR_WRITE | GR_CREATE;
45843 + __u32 mode;
45844 +
45845 + if (unlikely(fmode & O_APPEND))
45846 + reqmode |= GR_APPEND;
45847 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45848 + reqmode |= GR_READ;
45849 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45850 + reqmode |= GR_SETID;
45851 +
45852 + mode =
45853 + gr_check_create(dentry, p_dentry, p_mnt,
45854 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45855 +
45856 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45857 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45858 + reqmode & GR_READ ? " reading" : "",
45859 + reqmode & GR_WRITE ? " writing" : reqmode &
45860 + GR_APPEND ? " appending" : "");
45861 + return reqmode;
45862 + } else
45863 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45864 + {
45865 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45866 + reqmode & GR_READ ? " reading" : "",
45867 + reqmode & GR_WRITE ? " writing" : reqmode &
45868 + GR_APPEND ? " appending" : "");
45869 + return 0;
45870 + } else if (unlikely((mode & reqmode) != reqmode))
45871 + return 0;
45872 +
45873 + return reqmode;
45874 +}
45875 +
45876 +__u32
45877 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45878 + const int fmode)
45879 +{
45880 + __u32 mode, reqmode = GR_FIND;
45881 +
45882 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45883 + reqmode |= GR_EXEC;
45884 + if (fmode & S_IWOTH)
45885 + reqmode |= GR_WRITE;
45886 + if (fmode & S_IROTH)
45887 + reqmode |= GR_READ;
45888 +
45889 + mode =
45890 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45891 + mnt);
45892 +
45893 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45894 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45895 + reqmode & GR_READ ? " reading" : "",
45896 + reqmode & GR_WRITE ? " writing" : "",
45897 + reqmode & GR_EXEC ? " executing" : "");
45898 + return reqmode;
45899 + } else
45900 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45901 + {
45902 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45903 + reqmode & GR_READ ? " reading" : "",
45904 + reqmode & GR_WRITE ? " writing" : "",
45905 + reqmode & GR_EXEC ? " executing" : "");
45906 + return 0;
45907 + } else if (unlikely((mode & reqmode) != reqmode))
45908 + return 0;
45909 +
45910 + return reqmode;
45911 +}
45912 +
45913 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45914 +{
45915 + __u32 mode;
45916 +
45917 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45918 +
45919 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45920 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45921 + return mode;
45922 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45923 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45924 + return 0;
45925 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45926 + return 0;
45927 +
45928 + return (reqmode);
45929 +}
45930 +
45931 +__u32
45932 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45933 +{
45934 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45935 +}
45936 +
45937 +__u32
45938 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45939 +{
45940 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45941 +}
45942 +
45943 +__u32
45944 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45945 +{
45946 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45947 +}
45948 +
45949 +__u32
45950 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45951 +{
45952 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45953 +}
45954 +
45955 +__u32
45956 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45957 + mode_t mode)
45958 +{
45959 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45960 + return 1;
45961 +
45962 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45963 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45964 + GR_FCHMOD_ACL_MSG);
45965 + } else {
45966 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45967 + }
45968 +}
45969 +
45970 +__u32
45971 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45972 + mode_t mode)
45973 +{
45974 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45975 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45976 + GR_CHMOD_ACL_MSG);
45977 + } else {
45978 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45979 + }
45980 +}
45981 +
45982 +__u32
45983 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45984 +{
45985 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45986 +}
45987 +
45988 +__u32
45989 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45990 +{
45991 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45992 +}
45993 +
45994 +__u32
45995 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45996 +{
45997 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45998 +}
45999 +
46000 +__u32
46001 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
46002 +{
46003 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
46004 + GR_UNIXCONNECT_ACL_MSG);
46005 +}
46006 +
46007 +/* hardlinks require at minimum create permission,
46008 + any additional privilege required is based on the
46009 + privilege of the file being linked to
46010 +*/
46011 +__u32
46012 +gr_acl_handle_link(const struct dentry * new_dentry,
46013 + const struct dentry * parent_dentry,
46014 + const struct vfsmount * parent_mnt,
46015 + const struct dentry * old_dentry,
46016 + const struct vfsmount * old_mnt, const char *to)
46017 +{
46018 + __u32 mode;
46019 + __u32 needmode = GR_CREATE | GR_LINK;
46020 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
46021 +
46022 + mode =
46023 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
46024 + old_mnt);
46025 +
46026 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
46027 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46028 + return mode;
46029 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46030 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46031 + return 0;
46032 + } else if (unlikely((mode & needmode) != needmode))
46033 + return 0;
46034 +
46035 + return 1;
46036 +}
46037 +
46038 +__u32
46039 +gr_acl_handle_symlink(const struct dentry * new_dentry,
46040 + const struct dentry * parent_dentry,
46041 + const struct vfsmount * parent_mnt, const char *from)
46042 +{
46043 + __u32 needmode = GR_WRITE | GR_CREATE;
46044 + __u32 mode;
46045 +
46046 + mode =
46047 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
46048 + GR_CREATE | GR_AUDIT_CREATE |
46049 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
46050 +
46051 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
46052 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46053 + return mode;
46054 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46055 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46056 + return 0;
46057 + } else if (unlikely((mode & needmode) != needmode))
46058 + return 0;
46059 +
46060 + return (GR_WRITE | GR_CREATE);
46061 +}
46062 +
46063 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
46064 +{
46065 + __u32 mode;
46066 +
46067 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
46068 +
46069 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
46070 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
46071 + return mode;
46072 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
46073 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
46074 + return 0;
46075 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
46076 + return 0;
46077 +
46078 + return (reqmode);
46079 +}
46080 +
46081 +__u32
46082 +gr_acl_handle_mknod(const struct dentry * new_dentry,
46083 + const struct dentry * parent_dentry,
46084 + const struct vfsmount * parent_mnt,
46085 + const int mode)
46086 +{
46087 + __u32 reqmode = GR_WRITE | GR_CREATE;
46088 + if (unlikely(mode & (S_ISUID | S_ISGID)))
46089 + reqmode |= GR_SETID;
46090 +
46091 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46092 + reqmode, GR_MKNOD_ACL_MSG);
46093 +}
46094 +
46095 +__u32
46096 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
46097 + const struct dentry *parent_dentry,
46098 + const struct vfsmount *parent_mnt)
46099 +{
46100 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46101 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
46102 +}
46103 +
46104 +#define RENAME_CHECK_SUCCESS(old, new) \
46105 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
46106 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
46107 +
46108 +int
46109 +gr_acl_handle_rename(struct dentry *new_dentry,
46110 + struct dentry *parent_dentry,
46111 + const struct vfsmount *parent_mnt,
46112 + struct dentry *old_dentry,
46113 + struct inode *old_parent_inode,
46114 + struct vfsmount *old_mnt, const char *newname)
46115 +{
46116 + __u32 comp1, comp2;
46117 + int error = 0;
46118 +
46119 + if (unlikely(!gr_acl_is_enabled()))
46120 + return 0;
46121 +
46122 + if (!new_dentry->d_inode) {
46123 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
46124 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
46125 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
46126 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
46127 + GR_DELETE | GR_AUDIT_DELETE |
46128 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46129 + GR_SUPPRESS, old_mnt);
46130 + } else {
46131 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
46132 + GR_CREATE | GR_DELETE |
46133 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46134 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46135 + GR_SUPPRESS, parent_mnt);
46136 + comp2 =
46137 + gr_search_file(old_dentry,
46138 + GR_READ | GR_WRITE | GR_AUDIT_READ |
46139 + GR_DELETE | GR_AUDIT_DELETE |
46140 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46141 + }
46142 +
46143 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46144 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46145 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46146 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46147 + && !(comp2 & GR_SUPPRESS)) {
46148 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46149 + error = -EACCES;
46150 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46151 + error = -EACCES;
46152 +
46153 + return error;
46154 +}
46155 +
46156 +void
46157 +gr_acl_handle_exit(void)
46158 +{
46159 + u16 id;
46160 + char *rolename;
46161 + struct file *exec_file;
46162 +
46163 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46164 + !(current->role->roletype & GR_ROLE_PERSIST))) {
46165 + id = current->acl_role_id;
46166 + rolename = current->role->rolename;
46167 + gr_set_acls(1);
46168 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46169 + }
46170 +
46171 + write_lock(&grsec_exec_file_lock);
46172 + exec_file = current->exec_file;
46173 + current->exec_file = NULL;
46174 + write_unlock(&grsec_exec_file_lock);
46175 +
46176 + if (exec_file)
46177 + fput(exec_file);
46178 +}
46179 +
46180 +int
46181 +gr_acl_handle_procpidmem(const struct task_struct *task)
46182 +{
46183 + if (unlikely(!gr_acl_is_enabled()))
46184 + return 0;
46185 +
46186 + if (task != current && task->acl->mode & GR_PROTPROCFD)
46187 + return -EACCES;
46188 +
46189 + return 0;
46190 +}
46191 diff -urNp linux-2.6.39.4/grsecurity/gracl_ip.c linux-2.6.39.4/grsecurity/gracl_ip.c
46192 --- linux-2.6.39.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46193 +++ linux-2.6.39.4/grsecurity/gracl_ip.c 2011-08-05 19:44:37.000000000 -0400
46194 @@ -0,0 +1,381 @@
46195 +#include <linux/kernel.h>
46196 +#include <asm/uaccess.h>
46197 +#include <asm/errno.h>
46198 +#include <net/sock.h>
46199 +#include <linux/file.h>
46200 +#include <linux/fs.h>
46201 +#include <linux/net.h>
46202 +#include <linux/in.h>
46203 +#include <linux/skbuff.h>
46204 +#include <linux/ip.h>
46205 +#include <linux/udp.h>
46206 +#include <linux/types.h>
46207 +#include <linux/sched.h>
46208 +#include <linux/netdevice.h>
46209 +#include <linux/inetdevice.h>
46210 +#include <linux/gracl.h>
46211 +#include <linux/grsecurity.h>
46212 +#include <linux/grinternal.h>
46213 +
46214 +#define GR_BIND 0x01
46215 +#define GR_CONNECT 0x02
46216 +#define GR_INVERT 0x04
46217 +#define GR_BINDOVERRIDE 0x08
46218 +#define GR_CONNECTOVERRIDE 0x10
46219 +#define GR_SOCK_FAMILY 0x20
46220 +
46221 +static const char * gr_protocols[IPPROTO_MAX] = {
46222 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46223 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46224 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46225 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46226 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46227 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46228 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46229 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46230 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46231 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46232 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46233 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46234 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46235 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46236 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46237 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46238 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46239 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46240 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46241 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46242 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46243 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46244 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46245 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46246 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46247 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46248 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46249 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46250 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46251 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46252 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46253 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46254 + };
46255 +
46256 +static const char * gr_socktypes[SOCK_MAX] = {
46257 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46258 + "unknown:7", "unknown:8", "unknown:9", "packet"
46259 + };
46260 +
46261 +static const char * gr_sockfamilies[AF_MAX+1] = {
46262 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46263 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46264 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46265 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46266 + };
46267 +
46268 +const char *
46269 +gr_proto_to_name(unsigned char proto)
46270 +{
46271 + return gr_protocols[proto];
46272 +}
46273 +
46274 +const char *
46275 +gr_socktype_to_name(unsigned char type)
46276 +{
46277 + return gr_socktypes[type];
46278 +}
46279 +
46280 +const char *
46281 +gr_sockfamily_to_name(unsigned char family)
46282 +{
46283 + return gr_sockfamilies[family];
46284 +}
46285 +
46286 +int
46287 +gr_search_socket(const int domain, const int type, const int protocol)
46288 +{
46289 + struct acl_subject_label *curr;
46290 + const struct cred *cred = current_cred();
46291 +
46292 + if (unlikely(!gr_acl_is_enabled()))
46293 + goto exit;
46294 +
46295 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
46296 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46297 + goto exit; // let the kernel handle it
46298 +
46299 + curr = current->acl;
46300 +
46301 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46302 + /* the family is allowed, if this is PF_INET allow it only if
46303 + the extra sock type/protocol checks pass */
46304 + if (domain == PF_INET)
46305 + goto inet_check;
46306 + goto exit;
46307 + } else {
46308 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46309 + __u32 fakeip = 0;
46310 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46311 + current->role->roletype, cred->uid,
46312 + cred->gid, current->exec_file ?
46313 + gr_to_filename(current->exec_file->f_path.dentry,
46314 + current->exec_file->f_path.mnt) :
46315 + curr->filename, curr->filename,
46316 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46317 + &current->signal->saved_ip);
46318 + goto exit;
46319 + }
46320 + goto exit_fail;
46321 + }
46322 +
46323 +inet_check:
46324 + /* the rest of this checking is for IPv4 only */
46325 + if (!curr->ips)
46326 + goto exit;
46327 +
46328 + if ((curr->ip_type & (1 << type)) &&
46329 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46330 + goto exit;
46331 +
46332 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46333 + /* we don't place acls on raw sockets , and sometimes
46334 + dgram/ip sockets are opened for ioctl and not
46335 + bind/connect, so we'll fake a bind learn log */
46336 + if (type == SOCK_RAW || type == SOCK_PACKET) {
46337 + __u32 fakeip = 0;
46338 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46339 + current->role->roletype, cred->uid,
46340 + cred->gid, current->exec_file ?
46341 + gr_to_filename(current->exec_file->f_path.dentry,
46342 + current->exec_file->f_path.mnt) :
46343 + curr->filename, curr->filename,
46344 + &fakeip, 0, type,
46345 + protocol, GR_CONNECT, &current->signal->saved_ip);
46346 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46347 + __u32 fakeip = 0;
46348 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46349 + current->role->roletype, cred->uid,
46350 + cred->gid, current->exec_file ?
46351 + gr_to_filename(current->exec_file->f_path.dentry,
46352 + current->exec_file->f_path.mnt) :
46353 + curr->filename, curr->filename,
46354 + &fakeip, 0, type,
46355 + protocol, GR_BIND, &current->signal->saved_ip);
46356 + }
46357 + /* we'll log when they use connect or bind */
46358 + goto exit;
46359 + }
46360 +
46361 +exit_fail:
46362 + if (domain == PF_INET)
46363 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46364 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
46365 + else
46366 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46367 + gr_socktype_to_name(type), protocol);
46368 +
46369 + return 0;
46370 +exit:
46371 + return 1;
46372 +}
46373 +
46374 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46375 +{
46376 + if ((ip->mode & mode) &&
46377 + (ip_port >= ip->low) &&
46378 + (ip_port <= ip->high) &&
46379 + ((ntohl(ip_addr) & our_netmask) ==
46380 + (ntohl(our_addr) & our_netmask))
46381 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46382 + && (ip->type & (1 << type))) {
46383 + if (ip->mode & GR_INVERT)
46384 + return 2; // specifically denied
46385 + else
46386 + return 1; // allowed
46387 + }
46388 +
46389 + return 0; // not specifically allowed, may continue parsing
46390 +}
46391 +
46392 +static int
46393 +gr_search_connectbind(const int full_mode, struct sock *sk,
46394 + struct sockaddr_in *addr, const int type)
46395 +{
46396 + char iface[IFNAMSIZ] = {0};
46397 + struct acl_subject_label *curr;
46398 + struct acl_ip_label *ip;
46399 + struct inet_sock *isk;
46400 + struct net_device *dev;
46401 + struct in_device *idev;
46402 + unsigned long i;
46403 + int ret;
46404 + int mode = full_mode & (GR_BIND | GR_CONNECT);
46405 + __u32 ip_addr = 0;
46406 + __u32 our_addr;
46407 + __u32 our_netmask;
46408 + char *p;
46409 + __u16 ip_port = 0;
46410 + const struct cred *cred = current_cred();
46411 +
46412 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46413 + return 0;
46414 +
46415 + curr = current->acl;
46416 + isk = inet_sk(sk);
46417 +
46418 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46419 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46420 + addr->sin_addr.s_addr = curr->inaddr_any_override;
46421 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46422 + struct sockaddr_in saddr;
46423 + int err;
46424 +
46425 + saddr.sin_family = AF_INET;
46426 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
46427 + saddr.sin_port = isk->inet_sport;
46428 +
46429 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46430 + if (err)
46431 + return err;
46432 +
46433 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46434 + if (err)
46435 + return err;
46436 + }
46437 +
46438 + if (!curr->ips)
46439 + return 0;
46440 +
46441 + ip_addr = addr->sin_addr.s_addr;
46442 + ip_port = ntohs(addr->sin_port);
46443 +
46444 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46445 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46446 + current->role->roletype, cred->uid,
46447 + cred->gid, current->exec_file ?
46448 + gr_to_filename(current->exec_file->f_path.dentry,
46449 + current->exec_file->f_path.mnt) :
46450 + curr->filename, curr->filename,
46451 + &ip_addr, ip_port, type,
46452 + sk->sk_protocol, mode, &current->signal->saved_ip);
46453 + return 0;
46454 + }
46455 +
46456 + for (i = 0; i < curr->ip_num; i++) {
46457 + ip = *(curr->ips + i);
46458 + if (ip->iface != NULL) {
46459 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
46460 + p = strchr(iface, ':');
46461 + if (p != NULL)
46462 + *p = '\0';
46463 + dev = dev_get_by_name(sock_net(sk), iface);
46464 + if (dev == NULL)
46465 + continue;
46466 + idev = in_dev_get(dev);
46467 + if (idev == NULL) {
46468 + dev_put(dev);
46469 + continue;
46470 + }
46471 + rcu_read_lock();
46472 + for_ifa(idev) {
46473 + if (!strcmp(ip->iface, ifa->ifa_label)) {
46474 + our_addr = ifa->ifa_address;
46475 + our_netmask = 0xffffffff;
46476 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46477 + if (ret == 1) {
46478 + rcu_read_unlock();
46479 + in_dev_put(idev);
46480 + dev_put(dev);
46481 + return 0;
46482 + } else if (ret == 2) {
46483 + rcu_read_unlock();
46484 + in_dev_put(idev);
46485 + dev_put(dev);
46486 + goto denied;
46487 + }
46488 + }
46489 + } endfor_ifa(idev);
46490 + rcu_read_unlock();
46491 + in_dev_put(idev);
46492 + dev_put(dev);
46493 + } else {
46494 + our_addr = ip->addr;
46495 + our_netmask = ip->netmask;
46496 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46497 + if (ret == 1)
46498 + return 0;
46499 + else if (ret == 2)
46500 + goto denied;
46501 + }
46502 + }
46503 +
46504 +denied:
46505 + if (mode == GR_BIND)
46506 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46507 + else if (mode == GR_CONNECT)
46508 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46509 +
46510 + return -EACCES;
46511 +}
46512 +
46513 +int
46514 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46515 +{
46516 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46517 +}
46518 +
46519 +int
46520 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46521 +{
46522 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46523 +}
46524 +
46525 +int gr_search_listen(struct socket *sock)
46526 +{
46527 + struct sock *sk = sock->sk;
46528 + struct sockaddr_in addr;
46529 +
46530 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46531 + addr.sin_port = inet_sk(sk)->inet_sport;
46532 +
46533 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46534 +}
46535 +
46536 +int gr_search_accept(struct socket *sock)
46537 +{
46538 + struct sock *sk = sock->sk;
46539 + struct sockaddr_in addr;
46540 +
46541 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46542 + addr.sin_port = inet_sk(sk)->inet_sport;
46543 +
46544 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46545 +}
46546 +
46547 +int
46548 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46549 +{
46550 + if (addr)
46551 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46552 + else {
46553 + struct sockaddr_in sin;
46554 + const struct inet_sock *inet = inet_sk(sk);
46555 +
46556 + sin.sin_addr.s_addr = inet->inet_daddr;
46557 + sin.sin_port = inet->inet_dport;
46558 +
46559 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46560 + }
46561 +}
46562 +
46563 +int
46564 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46565 +{
46566 + struct sockaddr_in sin;
46567 +
46568 + if (unlikely(skb->len < sizeof (struct udphdr)))
46569 + return 0; // skip this packet
46570 +
46571 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46572 + sin.sin_port = udp_hdr(skb)->source;
46573 +
46574 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46575 +}
46576 diff -urNp linux-2.6.39.4/grsecurity/gracl_learn.c linux-2.6.39.4/grsecurity/gracl_learn.c
46577 --- linux-2.6.39.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46578 +++ linux-2.6.39.4/grsecurity/gracl_learn.c 2011-08-05 19:44:37.000000000 -0400
46579 @@ -0,0 +1,207 @@
46580 +#include <linux/kernel.h>
46581 +#include <linux/mm.h>
46582 +#include <linux/sched.h>
46583 +#include <linux/poll.h>
46584 +#include <linux/string.h>
46585 +#include <linux/file.h>
46586 +#include <linux/types.h>
46587 +#include <linux/vmalloc.h>
46588 +#include <linux/grinternal.h>
46589 +
46590 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46591 + size_t count, loff_t *ppos);
46592 +extern int gr_acl_is_enabled(void);
46593 +
46594 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46595 +static int gr_learn_attached;
46596 +
46597 +/* use a 512k buffer */
46598 +#define LEARN_BUFFER_SIZE (512 * 1024)
46599 +
46600 +static DEFINE_SPINLOCK(gr_learn_lock);
46601 +static DEFINE_MUTEX(gr_learn_user_mutex);
46602 +
46603 +/* we need to maintain two buffers, so that the kernel context of grlearn
46604 + uses a semaphore around the userspace copying, and the other kernel contexts
46605 + use a spinlock when copying into the buffer, since they cannot sleep
46606 +*/
46607 +static char *learn_buffer;
46608 +static char *learn_buffer_user;
46609 +static int learn_buffer_len;
46610 +static int learn_buffer_user_len;
46611 +
46612 +static ssize_t
46613 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46614 +{
46615 + DECLARE_WAITQUEUE(wait, current);
46616 + ssize_t retval = 0;
46617 +
46618 + add_wait_queue(&learn_wait, &wait);
46619 + set_current_state(TASK_INTERRUPTIBLE);
46620 + do {
46621 + mutex_lock(&gr_learn_user_mutex);
46622 + spin_lock(&gr_learn_lock);
46623 + if (learn_buffer_len)
46624 + break;
46625 + spin_unlock(&gr_learn_lock);
46626 + mutex_unlock(&gr_learn_user_mutex);
46627 + if (file->f_flags & O_NONBLOCK) {
46628 + retval = -EAGAIN;
46629 + goto out;
46630 + }
46631 + if (signal_pending(current)) {
46632 + retval = -ERESTARTSYS;
46633 + goto out;
46634 + }
46635 +
46636 + schedule();
46637 + } while (1);
46638 +
46639 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46640 + learn_buffer_user_len = learn_buffer_len;
46641 + retval = learn_buffer_len;
46642 + learn_buffer_len = 0;
46643 +
46644 + spin_unlock(&gr_learn_lock);
46645 +
46646 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46647 + retval = -EFAULT;
46648 +
46649 + mutex_unlock(&gr_learn_user_mutex);
46650 +out:
46651 + set_current_state(TASK_RUNNING);
46652 + remove_wait_queue(&learn_wait, &wait);
46653 + return retval;
46654 +}
46655 +
46656 +static unsigned int
46657 +poll_learn(struct file * file, poll_table * wait)
46658 +{
46659 + poll_wait(file, &learn_wait, wait);
46660 +
46661 + if (learn_buffer_len)
46662 + return (POLLIN | POLLRDNORM);
46663 +
46664 + return 0;
46665 +}
46666 +
46667 +void
46668 +gr_clear_learn_entries(void)
46669 +{
46670 + char *tmp;
46671 +
46672 + mutex_lock(&gr_learn_user_mutex);
46673 + spin_lock(&gr_learn_lock);
46674 + tmp = learn_buffer;
46675 + learn_buffer = NULL;
46676 + spin_unlock(&gr_learn_lock);
46677 + if (tmp)
46678 + vfree(tmp);
46679 + if (learn_buffer_user != NULL) {
46680 + vfree(learn_buffer_user);
46681 + learn_buffer_user = NULL;
46682 + }
46683 + learn_buffer_len = 0;
46684 + mutex_unlock(&gr_learn_user_mutex);
46685 +
46686 + return;
46687 +}
46688 +
46689 +void
46690 +gr_add_learn_entry(const char *fmt, ...)
46691 +{
46692 + va_list args;
46693 + unsigned int len;
46694 +
46695 + if (!gr_learn_attached)
46696 + return;
46697 +
46698 + spin_lock(&gr_learn_lock);
46699 +
46700 + /* leave a gap at the end so we know when it's "full" but don't have to
46701 + compute the exact length of the string we're trying to append
46702 + */
46703 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46704 + spin_unlock(&gr_learn_lock);
46705 + wake_up_interruptible(&learn_wait);
46706 + return;
46707 + }
46708 + if (learn_buffer == NULL) {
46709 + spin_unlock(&gr_learn_lock);
46710 + return;
46711 + }
46712 +
46713 + va_start(args, fmt);
46714 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46715 + va_end(args);
46716 +
46717 + learn_buffer_len += len + 1;
46718 +
46719 + spin_unlock(&gr_learn_lock);
46720 + wake_up_interruptible(&learn_wait);
46721 +
46722 + return;
46723 +}
46724 +
46725 +static int
46726 +open_learn(struct inode *inode, struct file *file)
46727 +{
46728 + if (file->f_mode & FMODE_READ && gr_learn_attached)
46729 + return -EBUSY;
46730 + if (file->f_mode & FMODE_READ) {
46731 + int retval = 0;
46732 + mutex_lock(&gr_learn_user_mutex);
46733 + if (learn_buffer == NULL)
46734 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46735 + if (learn_buffer_user == NULL)
46736 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46737 + if (learn_buffer == NULL) {
46738 + retval = -ENOMEM;
46739 + goto out_error;
46740 + }
46741 + if (learn_buffer_user == NULL) {
46742 + retval = -ENOMEM;
46743 + goto out_error;
46744 + }
46745 + learn_buffer_len = 0;
46746 + learn_buffer_user_len = 0;
46747 + gr_learn_attached = 1;
46748 +out_error:
46749 + mutex_unlock(&gr_learn_user_mutex);
46750 + return retval;
46751 + }
46752 + return 0;
46753 +}
46754 +
46755 +static int
46756 +close_learn(struct inode *inode, struct file *file)
46757 +{
46758 + if (file->f_mode & FMODE_READ) {
46759 + char *tmp = NULL;
46760 + mutex_lock(&gr_learn_user_mutex);
46761 + spin_lock(&gr_learn_lock);
46762 + tmp = learn_buffer;
46763 + learn_buffer = NULL;
46764 + spin_unlock(&gr_learn_lock);
46765 + if (tmp)
46766 + vfree(tmp);
46767 + if (learn_buffer_user != NULL) {
46768 + vfree(learn_buffer_user);
46769 + learn_buffer_user = NULL;
46770 + }
46771 + learn_buffer_len = 0;
46772 + learn_buffer_user_len = 0;
46773 + gr_learn_attached = 0;
46774 + mutex_unlock(&gr_learn_user_mutex);
46775 + }
46776 +
46777 + return 0;
46778 +}
46779 +
46780 +const struct file_operations grsec_fops = {
46781 + .read = read_learn,
46782 + .write = write_grsec_handler,
46783 + .open = open_learn,
46784 + .release = close_learn,
46785 + .poll = poll_learn,
46786 +};
46787 diff -urNp linux-2.6.39.4/grsecurity/gracl_res.c linux-2.6.39.4/grsecurity/gracl_res.c
46788 --- linux-2.6.39.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46789 +++ linux-2.6.39.4/grsecurity/gracl_res.c 2011-08-05 19:44:37.000000000 -0400
46790 @@ -0,0 +1,68 @@
46791 +#include <linux/kernel.h>
46792 +#include <linux/sched.h>
46793 +#include <linux/gracl.h>
46794 +#include <linux/grinternal.h>
46795 +
46796 +static const char *restab_log[] = {
46797 + [RLIMIT_CPU] = "RLIMIT_CPU",
46798 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46799 + [RLIMIT_DATA] = "RLIMIT_DATA",
46800 + [RLIMIT_STACK] = "RLIMIT_STACK",
46801 + [RLIMIT_CORE] = "RLIMIT_CORE",
46802 + [RLIMIT_RSS] = "RLIMIT_RSS",
46803 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
46804 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46805 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46806 + [RLIMIT_AS] = "RLIMIT_AS",
46807 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46808 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46809 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46810 + [RLIMIT_NICE] = "RLIMIT_NICE",
46811 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46812 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46813 + [GR_CRASH_RES] = "RLIMIT_CRASH"
46814 +};
46815 +
46816 +void
46817 +gr_log_resource(const struct task_struct *task,
46818 + const int res, const unsigned long wanted, const int gt)
46819 +{
46820 + const struct cred *cred;
46821 + unsigned long rlim;
46822 +
46823 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
46824 + return;
46825 +
46826 + // not yet supported resource
46827 + if (unlikely(!restab_log[res]))
46828 + return;
46829 +
46830 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46831 + rlim = task_rlimit_max(task, res);
46832 + else
46833 + rlim = task_rlimit(task, res);
46834 +
46835 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46836 + return;
46837 +
46838 + rcu_read_lock();
46839 + cred = __task_cred(task);
46840 +
46841 + if (res == RLIMIT_NPROC &&
46842 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46843 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46844 + goto out_rcu_unlock;
46845 + else if (res == RLIMIT_MEMLOCK &&
46846 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46847 + goto out_rcu_unlock;
46848 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46849 + goto out_rcu_unlock;
46850 + rcu_read_unlock();
46851 +
46852 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46853 +
46854 + return;
46855 +out_rcu_unlock:
46856 + rcu_read_unlock();
46857 + return;
46858 +}
46859 diff -urNp linux-2.6.39.4/grsecurity/gracl_segv.c linux-2.6.39.4/grsecurity/gracl_segv.c
46860 --- linux-2.6.39.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46861 +++ linux-2.6.39.4/grsecurity/gracl_segv.c 2011-08-05 19:44:37.000000000 -0400
46862 @@ -0,0 +1,299 @@
46863 +#include <linux/kernel.h>
46864 +#include <linux/mm.h>
46865 +#include <asm/uaccess.h>
46866 +#include <asm/errno.h>
46867 +#include <asm/mman.h>
46868 +#include <net/sock.h>
46869 +#include <linux/file.h>
46870 +#include <linux/fs.h>
46871 +#include <linux/net.h>
46872 +#include <linux/in.h>
46873 +#include <linux/slab.h>
46874 +#include <linux/types.h>
46875 +#include <linux/sched.h>
46876 +#include <linux/timer.h>
46877 +#include <linux/gracl.h>
46878 +#include <linux/grsecurity.h>
46879 +#include <linux/grinternal.h>
46880 +
46881 +static struct crash_uid *uid_set;
46882 +static unsigned short uid_used;
46883 +static DEFINE_SPINLOCK(gr_uid_lock);
46884 +extern rwlock_t gr_inode_lock;
46885 +extern struct acl_subject_label *
46886 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46887 + struct acl_role_label *role);
46888 +
46889 +#ifdef CONFIG_BTRFS_FS
46890 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46891 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46892 +#endif
46893 +
46894 +static inline dev_t __get_dev(const struct dentry *dentry)
46895 +{
46896 +#ifdef CONFIG_BTRFS_FS
46897 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46898 + return get_btrfs_dev_from_inode(dentry->d_inode);
46899 + else
46900 +#endif
46901 + return dentry->d_inode->i_sb->s_dev;
46902 +}
46903 +
46904 +int
46905 +gr_init_uidset(void)
46906 +{
46907 + uid_set =
46908 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46909 + uid_used = 0;
46910 +
46911 + return uid_set ? 1 : 0;
46912 +}
46913 +
46914 +void
46915 +gr_free_uidset(void)
46916 +{
46917 + if (uid_set)
46918 + kfree(uid_set);
46919 +
46920 + return;
46921 +}
46922 +
46923 +int
46924 +gr_find_uid(const uid_t uid)
46925 +{
46926 + struct crash_uid *tmp = uid_set;
46927 + uid_t buid;
46928 + int low = 0, high = uid_used - 1, mid;
46929 +
46930 + while (high >= low) {
46931 + mid = (low + high) >> 1;
46932 + buid = tmp[mid].uid;
46933 + if (buid == uid)
46934 + return mid;
46935 + if (buid > uid)
46936 + high = mid - 1;
46937 + if (buid < uid)
46938 + low = mid + 1;
46939 + }
46940 +
46941 + return -1;
46942 +}
46943 +
46944 +static __inline__ void
46945 +gr_insertsort(void)
46946 +{
46947 + unsigned short i, j;
46948 + struct crash_uid index;
46949 +
46950 + for (i = 1; i < uid_used; i++) {
46951 + index = uid_set[i];
46952 + j = i;
46953 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46954 + uid_set[j] = uid_set[j - 1];
46955 + j--;
46956 + }
46957 + uid_set[j] = index;
46958 + }
46959 +
46960 + return;
46961 +}
46962 +
46963 +static __inline__ void
46964 +gr_insert_uid(const uid_t uid, const unsigned long expires)
46965 +{
46966 + int loc;
46967 +
46968 + if (uid_used == GR_UIDTABLE_MAX)
46969 + return;
46970 +
46971 + loc = gr_find_uid(uid);
46972 +
46973 + if (loc >= 0) {
46974 + uid_set[loc].expires = expires;
46975 + return;
46976 + }
46977 +
46978 + uid_set[uid_used].uid = uid;
46979 + uid_set[uid_used].expires = expires;
46980 + uid_used++;
46981 +
46982 + gr_insertsort();
46983 +
46984 + return;
46985 +}
46986 +
46987 +void
46988 +gr_remove_uid(const unsigned short loc)
46989 +{
46990 + unsigned short i;
46991 +
46992 + for (i = loc + 1; i < uid_used; i++)
46993 + uid_set[i - 1] = uid_set[i];
46994 +
46995 + uid_used--;
46996 +
46997 + return;
46998 +}
46999 +
47000 +int
47001 +gr_check_crash_uid(const uid_t uid)
47002 +{
47003 + int loc;
47004 + int ret = 0;
47005 +
47006 + if (unlikely(!gr_acl_is_enabled()))
47007 + return 0;
47008 +
47009 + spin_lock(&gr_uid_lock);
47010 + loc = gr_find_uid(uid);
47011 +
47012 + if (loc < 0)
47013 + goto out_unlock;
47014 +
47015 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
47016 + gr_remove_uid(loc);
47017 + else
47018 + ret = 1;
47019 +
47020 +out_unlock:
47021 + spin_unlock(&gr_uid_lock);
47022 + return ret;
47023 +}
47024 +
47025 +static __inline__ int
47026 +proc_is_setxid(const struct cred *cred)
47027 +{
47028 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
47029 + cred->uid != cred->fsuid)
47030 + return 1;
47031 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
47032 + cred->gid != cred->fsgid)
47033 + return 1;
47034 +
47035 + return 0;
47036 +}
47037 +
47038 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
47039 +
47040 +void
47041 +gr_handle_crash(struct task_struct *task, const int sig)
47042 +{
47043 + struct acl_subject_label *curr;
47044 + struct acl_subject_label *curr2;
47045 + struct task_struct *tsk, *tsk2;
47046 + const struct cred *cred;
47047 + const struct cred *cred2;
47048 +
47049 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
47050 + return;
47051 +
47052 + if (unlikely(!gr_acl_is_enabled()))
47053 + return;
47054 +
47055 + curr = task->acl;
47056 +
47057 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
47058 + return;
47059 +
47060 + if (time_before_eq(curr->expires, get_seconds())) {
47061 + curr->expires = 0;
47062 + curr->crashes = 0;
47063 + }
47064 +
47065 + curr->crashes++;
47066 +
47067 + if (!curr->expires)
47068 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
47069 +
47070 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47071 + time_after(curr->expires, get_seconds())) {
47072 + rcu_read_lock();
47073 + cred = __task_cred(task);
47074 + if (cred->uid && proc_is_setxid(cred)) {
47075 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47076 + spin_lock(&gr_uid_lock);
47077 + gr_insert_uid(cred->uid, curr->expires);
47078 + spin_unlock(&gr_uid_lock);
47079 + curr->expires = 0;
47080 + curr->crashes = 0;
47081 + read_lock(&tasklist_lock);
47082 + do_each_thread(tsk2, tsk) {
47083 + cred2 = __task_cred(tsk);
47084 + if (tsk != task && cred2->uid == cred->uid)
47085 + gr_fake_force_sig(SIGKILL, tsk);
47086 + } while_each_thread(tsk2, tsk);
47087 + read_unlock(&tasklist_lock);
47088 + } else {
47089 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47090 + read_lock(&tasklist_lock);
47091 + do_each_thread(tsk2, tsk) {
47092 + if (likely(tsk != task)) {
47093 + curr2 = tsk->acl;
47094 +
47095 + if (curr2->device == curr->device &&
47096 + curr2->inode == curr->inode)
47097 + gr_fake_force_sig(SIGKILL, tsk);
47098 + }
47099 + } while_each_thread(tsk2, tsk);
47100 + read_unlock(&tasklist_lock);
47101 + }
47102 + rcu_read_unlock();
47103 + }
47104 +
47105 + return;
47106 +}
47107 +
47108 +int
47109 +gr_check_crash_exec(const struct file *filp)
47110 +{
47111 + struct acl_subject_label *curr;
47112 +
47113 + if (unlikely(!gr_acl_is_enabled()))
47114 + return 0;
47115 +
47116 + read_lock(&gr_inode_lock);
47117 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
47118 + __get_dev(filp->f_path.dentry),
47119 + current->role);
47120 + read_unlock(&gr_inode_lock);
47121 +
47122 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
47123 + (!curr->crashes && !curr->expires))
47124 + return 0;
47125 +
47126 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47127 + time_after(curr->expires, get_seconds()))
47128 + return 1;
47129 + else if (time_before_eq(curr->expires, get_seconds())) {
47130 + curr->crashes = 0;
47131 + curr->expires = 0;
47132 + }
47133 +
47134 + return 0;
47135 +}
47136 +
47137 +void
47138 +gr_handle_alertkill(struct task_struct *task)
47139 +{
47140 + struct acl_subject_label *curracl;
47141 + __u32 curr_ip;
47142 + struct task_struct *p, *p2;
47143 +
47144 + if (unlikely(!gr_acl_is_enabled()))
47145 + return;
47146 +
47147 + curracl = task->acl;
47148 + curr_ip = task->signal->curr_ip;
47149 +
47150 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47151 + read_lock(&tasklist_lock);
47152 + do_each_thread(p2, p) {
47153 + if (p->signal->curr_ip == curr_ip)
47154 + gr_fake_force_sig(SIGKILL, p);
47155 + } while_each_thread(p2, p);
47156 + read_unlock(&tasklist_lock);
47157 + } else if (curracl->mode & GR_KILLPROC)
47158 + gr_fake_force_sig(SIGKILL, task);
47159 +
47160 + return;
47161 +}
47162 diff -urNp linux-2.6.39.4/grsecurity/gracl_shm.c linux-2.6.39.4/grsecurity/gracl_shm.c
47163 --- linux-2.6.39.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47164 +++ linux-2.6.39.4/grsecurity/gracl_shm.c 2011-08-05 19:44:37.000000000 -0400
47165 @@ -0,0 +1,40 @@
47166 +#include <linux/kernel.h>
47167 +#include <linux/mm.h>
47168 +#include <linux/sched.h>
47169 +#include <linux/file.h>
47170 +#include <linux/ipc.h>
47171 +#include <linux/gracl.h>
47172 +#include <linux/grsecurity.h>
47173 +#include <linux/grinternal.h>
47174 +
47175 +int
47176 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47177 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47178 +{
47179 + struct task_struct *task;
47180 +
47181 + if (!gr_acl_is_enabled())
47182 + return 1;
47183 +
47184 + rcu_read_lock();
47185 + read_lock(&tasklist_lock);
47186 +
47187 + task = find_task_by_vpid(shm_cprid);
47188 +
47189 + if (unlikely(!task))
47190 + task = find_task_by_vpid(shm_lapid);
47191 +
47192 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47193 + (task->pid == shm_lapid)) &&
47194 + (task->acl->mode & GR_PROTSHM) &&
47195 + (task->acl != current->acl))) {
47196 + read_unlock(&tasklist_lock);
47197 + rcu_read_unlock();
47198 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47199 + return 0;
47200 + }
47201 + read_unlock(&tasklist_lock);
47202 + rcu_read_unlock();
47203 +
47204 + return 1;
47205 +}
47206 diff -urNp linux-2.6.39.4/grsecurity/grsec_chdir.c linux-2.6.39.4/grsecurity/grsec_chdir.c
47207 --- linux-2.6.39.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47208 +++ linux-2.6.39.4/grsecurity/grsec_chdir.c 2011-08-05 19:44:37.000000000 -0400
47209 @@ -0,0 +1,19 @@
47210 +#include <linux/kernel.h>
47211 +#include <linux/sched.h>
47212 +#include <linux/fs.h>
47213 +#include <linux/file.h>
47214 +#include <linux/grsecurity.h>
47215 +#include <linux/grinternal.h>
47216 +
47217 +void
47218 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47219 +{
47220 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47221 + if ((grsec_enable_chdir && grsec_enable_group &&
47222 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47223 + !grsec_enable_group)) {
47224 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47225 + }
47226 +#endif
47227 + return;
47228 +}
47229 diff -urNp linux-2.6.39.4/grsecurity/grsec_chroot.c linux-2.6.39.4/grsecurity/grsec_chroot.c
47230 --- linux-2.6.39.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47231 +++ linux-2.6.39.4/grsecurity/grsec_chroot.c 2011-08-05 19:44:37.000000000 -0400
47232 @@ -0,0 +1,349 @@
47233 +#include <linux/kernel.h>
47234 +#include <linux/module.h>
47235 +#include <linux/sched.h>
47236 +#include <linux/file.h>
47237 +#include <linux/fs.h>
47238 +#include <linux/mount.h>
47239 +#include <linux/types.h>
47240 +#include <linux/pid_namespace.h>
47241 +#include <linux/grsecurity.h>
47242 +#include <linux/grinternal.h>
47243 +
47244 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47245 +{
47246 +#ifdef CONFIG_GRKERNSEC
47247 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47248 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47249 + task->gr_is_chrooted = 1;
47250 + else
47251 + task->gr_is_chrooted = 0;
47252 +
47253 + task->gr_chroot_dentry = path->dentry;
47254 +#endif
47255 + return;
47256 +}
47257 +
47258 +void gr_clear_chroot_entries(struct task_struct *task)
47259 +{
47260 +#ifdef CONFIG_GRKERNSEC
47261 + task->gr_is_chrooted = 0;
47262 + task->gr_chroot_dentry = NULL;
47263 +#endif
47264 + return;
47265 +}
47266 +
47267 +int
47268 +gr_handle_chroot_unix(const pid_t pid)
47269 +{
47270 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47271 + struct task_struct *p;
47272 +
47273 + if (unlikely(!grsec_enable_chroot_unix))
47274 + return 1;
47275 +
47276 + if (likely(!proc_is_chrooted(current)))
47277 + return 1;
47278 +
47279 + rcu_read_lock();
47280 + read_lock(&tasklist_lock);
47281 + p = find_task_by_vpid_unrestricted(pid);
47282 + if (unlikely(p && !have_same_root(current, p))) {
47283 + read_unlock(&tasklist_lock);
47284 + rcu_read_unlock();
47285 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47286 + return 0;
47287 + }
47288 + read_unlock(&tasklist_lock);
47289 + rcu_read_unlock();
47290 +#endif
47291 + return 1;
47292 +}
47293 +
47294 +int
47295 +gr_handle_chroot_nice(void)
47296 +{
47297 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47298 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47299 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47300 + return -EPERM;
47301 + }
47302 +#endif
47303 + return 0;
47304 +}
47305 +
47306 +int
47307 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47308 +{
47309 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47310 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47311 + && proc_is_chrooted(current)) {
47312 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47313 + return -EACCES;
47314 + }
47315 +#endif
47316 + return 0;
47317 +}
47318 +
47319 +int
47320 +gr_handle_chroot_rawio(const struct inode *inode)
47321 +{
47322 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47323 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47324 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47325 + return 1;
47326 +#endif
47327 + return 0;
47328 +}
47329 +
47330 +int
47331 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47332 +{
47333 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47334 + struct task_struct *p;
47335 + int ret = 0;
47336 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47337 + return ret;
47338 +
47339 + read_lock(&tasklist_lock);
47340 + do_each_pid_task(pid, type, p) {
47341 + if (!have_same_root(current, p)) {
47342 + ret = 1;
47343 + goto out;
47344 + }
47345 + } while_each_pid_task(pid, type, p);
47346 +out:
47347 + read_unlock(&tasklist_lock);
47348 + return ret;
47349 +#endif
47350 + return 0;
47351 +}
47352 +
47353 +int
47354 +gr_pid_is_chrooted(struct task_struct *p)
47355 +{
47356 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47357 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47358 + return 0;
47359 +
47360 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47361 + !have_same_root(current, p)) {
47362 + return 1;
47363 + }
47364 +#endif
47365 + return 0;
47366 +}
47367 +
47368 +EXPORT_SYMBOL(gr_pid_is_chrooted);
47369 +
47370 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47371 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47372 +{
47373 + struct path path, currentroot;
47374 + int ret = 0;
47375 +
47376 + path.dentry = (struct dentry *)u_dentry;
47377 + path.mnt = (struct vfsmount *)u_mnt;
47378 + get_fs_root(current->fs, &currentroot);
47379 + if (path_is_under(&path, &currentroot))
47380 + ret = 1;
47381 + path_put(&currentroot);
47382 +
47383 + return ret;
47384 +}
47385 +#endif
47386 +
47387 +int
47388 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47389 +{
47390 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47391 + if (!grsec_enable_chroot_fchdir)
47392 + return 1;
47393 +
47394 + if (!proc_is_chrooted(current))
47395 + return 1;
47396 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47397 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47398 + return 0;
47399 + }
47400 +#endif
47401 + return 1;
47402 +}
47403 +
47404 +int
47405 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47406 + const time_t shm_createtime)
47407 +{
47408 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47409 + struct task_struct *p;
47410 + time_t starttime;
47411 +
47412 + if (unlikely(!grsec_enable_chroot_shmat))
47413 + return 1;
47414 +
47415 + if (likely(!proc_is_chrooted(current)))
47416 + return 1;
47417 +
47418 + rcu_read_lock();
47419 + read_lock(&tasklist_lock);
47420 +
47421 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47422 + starttime = p->start_time.tv_sec;
47423 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47424 + if (have_same_root(current, p)) {
47425 + goto allow;
47426 + } else {
47427 + read_unlock(&tasklist_lock);
47428 + rcu_read_unlock();
47429 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47430 + return 0;
47431 + }
47432 + }
47433 + /* creator exited, pid reuse, fall through to next check */
47434 + }
47435 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47436 + if (unlikely(!have_same_root(current, p))) {
47437 + read_unlock(&tasklist_lock);
47438 + rcu_read_unlock();
47439 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47440 + return 0;
47441 + }
47442 + }
47443 +
47444 +allow:
47445 + read_unlock(&tasklist_lock);
47446 + rcu_read_unlock();
47447 +#endif
47448 + return 1;
47449 +}
47450 +
47451 +void
47452 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47453 +{
47454 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47455 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47456 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47457 +#endif
47458 + return;
47459 +}
47460 +
47461 +int
47462 +gr_handle_chroot_mknod(const struct dentry *dentry,
47463 + const struct vfsmount *mnt, const int mode)
47464 +{
47465 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47466 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47467 + proc_is_chrooted(current)) {
47468 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47469 + return -EPERM;
47470 + }
47471 +#endif
47472 + return 0;
47473 +}
47474 +
47475 +int
47476 +gr_handle_chroot_mount(const struct dentry *dentry,
47477 + const struct vfsmount *mnt, const char *dev_name)
47478 +{
47479 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47480 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47481 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47482 + return -EPERM;
47483 + }
47484 +#endif
47485 + return 0;
47486 +}
47487 +
47488 +int
47489 +gr_handle_chroot_pivot(void)
47490 +{
47491 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47492 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47493 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47494 + return -EPERM;
47495 + }
47496 +#endif
47497 + return 0;
47498 +}
47499 +
47500 +int
47501 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47502 +{
47503 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47504 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47505 + !gr_is_outside_chroot(dentry, mnt)) {
47506 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47507 + return -EPERM;
47508 + }
47509 +#endif
47510 + return 0;
47511 +}
47512 +
47513 +int
47514 +gr_handle_chroot_caps(struct path *path)
47515 +{
47516 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47517 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47518 + (init_task.fs->root.dentry != path->dentry) &&
47519 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47520 +
47521 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47522 + const struct cred *old = current_cred();
47523 + struct cred *new = prepare_creds();
47524 + if (new == NULL)
47525 + return 1;
47526 +
47527 + new->cap_permitted = cap_drop(old->cap_permitted,
47528 + chroot_caps);
47529 + new->cap_inheritable = cap_drop(old->cap_inheritable,
47530 + chroot_caps);
47531 + new->cap_effective = cap_drop(old->cap_effective,
47532 + chroot_caps);
47533 +
47534 + commit_creds(new);
47535 +
47536 + return 0;
47537 + }
47538 +#endif
47539 + return 0;
47540 +}
47541 +
47542 +int
47543 +gr_handle_chroot_sysctl(const int op)
47544 +{
47545 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47546 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47547 + proc_is_chrooted(current))
47548 + return -EACCES;
47549 +#endif
47550 + return 0;
47551 +}
47552 +
47553 +void
47554 +gr_handle_chroot_chdir(struct path *path)
47555 +{
47556 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47557 + if (grsec_enable_chroot_chdir)
47558 + set_fs_pwd(current->fs, path);
47559 +#endif
47560 + return;
47561 +}
47562 +
47563 +int
47564 +gr_handle_chroot_chmod(const struct dentry *dentry,
47565 + const struct vfsmount *mnt, const int mode)
47566 +{
47567 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47568 + /* allow chmod +s on directories, but not files */
47569 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47570 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47571 + proc_is_chrooted(current)) {
47572 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47573 + return -EPERM;
47574 + }
47575 +#endif
47576 + return 0;
47577 +}
47578 +
47579 +#ifdef CONFIG_SECURITY
47580 +EXPORT_SYMBOL(gr_handle_chroot_caps);
47581 +#endif
47582 diff -urNp linux-2.6.39.4/grsecurity/grsec_disabled.c linux-2.6.39.4/grsecurity/grsec_disabled.c
47583 --- linux-2.6.39.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47584 +++ linux-2.6.39.4/grsecurity/grsec_disabled.c 2011-08-05 19:44:37.000000000 -0400
47585 @@ -0,0 +1,447 @@
47586 +#include <linux/kernel.h>
47587 +#include <linux/module.h>
47588 +#include <linux/sched.h>
47589 +#include <linux/file.h>
47590 +#include <linux/fs.h>
47591 +#include <linux/kdev_t.h>
47592 +#include <linux/net.h>
47593 +#include <linux/in.h>
47594 +#include <linux/ip.h>
47595 +#include <linux/skbuff.h>
47596 +#include <linux/sysctl.h>
47597 +
47598 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47599 +void
47600 +pax_set_initial_flags(struct linux_binprm *bprm)
47601 +{
47602 + return;
47603 +}
47604 +#endif
47605 +
47606 +#ifdef CONFIG_SYSCTL
47607 +__u32
47608 +gr_handle_sysctl(const struct ctl_table * table, const int op)
47609 +{
47610 + return 0;
47611 +}
47612 +#endif
47613 +
47614 +#ifdef CONFIG_TASKSTATS
47615 +int gr_is_taskstats_denied(int pid)
47616 +{
47617 + return 0;
47618 +}
47619 +#endif
47620 +
47621 +int
47622 +gr_acl_is_enabled(void)
47623 +{
47624 + return 0;
47625 +}
47626 +
47627 +int
47628 +gr_handle_rawio(const struct inode *inode)
47629 +{
47630 + return 0;
47631 +}
47632 +
47633 +void
47634 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47635 +{
47636 + return;
47637 +}
47638 +
47639 +int
47640 +gr_handle_ptrace(struct task_struct *task, const long request)
47641 +{
47642 + return 0;
47643 +}
47644 +
47645 +int
47646 +gr_handle_proc_ptrace(struct task_struct *task)
47647 +{
47648 + return 0;
47649 +}
47650 +
47651 +void
47652 +gr_learn_resource(const struct task_struct *task,
47653 + const int res, const unsigned long wanted, const int gt)
47654 +{
47655 + return;
47656 +}
47657 +
47658 +int
47659 +gr_set_acls(const int type)
47660 +{
47661 + return 0;
47662 +}
47663 +
47664 +int
47665 +gr_check_hidden_task(const struct task_struct *tsk)
47666 +{
47667 + return 0;
47668 +}
47669 +
47670 +int
47671 +gr_check_protected_task(const struct task_struct *task)
47672 +{
47673 + return 0;
47674 +}
47675 +
47676 +int
47677 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47678 +{
47679 + return 0;
47680 +}
47681 +
47682 +void
47683 +gr_copy_label(struct task_struct *tsk)
47684 +{
47685 + return;
47686 +}
47687 +
47688 +void
47689 +gr_set_pax_flags(struct task_struct *task)
47690 +{
47691 + return;
47692 +}
47693 +
47694 +int
47695 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47696 + const int unsafe_share)
47697 +{
47698 + return 0;
47699 +}
47700 +
47701 +void
47702 +gr_handle_delete(const ino_t ino, const dev_t dev)
47703 +{
47704 + return;
47705 +}
47706 +
47707 +void
47708 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47709 +{
47710 + return;
47711 +}
47712 +
47713 +void
47714 +gr_handle_crash(struct task_struct *task, const int sig)
47715 +{
47716 + return;
47717 +}
47718 +
47719 +int
47720 +gr_check_crash_exec(const struct file *filp)
47721 +{
47722 + return 0;
47723 +}
47724 +
47725 +int
47726 +gr_check_crash_uid(const uid_t uid)
47727 +{
47728 + return 0;
47729 +}
47730 +
47731 +void
47732 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47733 + struct dentry *old_dentry,
47734 + struct dentry *new_dentry,
47735 + struct vfsmount *mnt, const __u8 replace)
47736 +{
47737 + return;
47738 +}
47739 +
47740 +int
47741 +gr_search_socket(const int family, const int type, const int protocol)
47742 +{
47743 + return 1;
47744 +}
47745 +
47746 +int
47747 +gr_search_connectbind(const int mode, const struct socket *sock,
47748 + const struct sockaddr_in *addr)
47749 +{
47750 + return 0;
47751 +}
47752 +
47753 +int
47754 +gr_is_capable(const int cap)
47755 +{
47756 + return 1;
47757 +}
47758 +
47759 +int
47760 +gr_is_capable_nolog(const int cap)
47761 +{
47762 + return 1;
47763 +}
47764 +
47765 +void
47766 +gr_handle_alertkill(struct task_struct *task)
47767 +{
47768 + return;
47769 +}
47770 +
47771 +__u32
47772 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47773 +{
47774 + return 1;
47775 +}
47776 +
47777 +__u32
47778 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47779 + const struct vfsmount * mnt)
47780 +{
47781 + return 1;
47782 +}
47783 +
47784 +__u32
47785 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47786 + const int fmode)
47787 +{
47788 + return 1;
47789 +}
47790 +
47791 +__u32
47792 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47793 +{
47794 + return 1;
47795 +}
47796 +
47797 +__u32
47798 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47799 +{
47800 + return 1;
47801 +}
47802 +
47803 +int
47804 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47805 + unsigned int *vm_flags)
47806 +{
47807 + return 1;
47808 +}
47809 +
47810 +__u32
47811 +gr_acl_handle_truncate(const struct dentry * dentry,
47812 + const struct vfsmount * mnt)
47813 +{
47814 + return 1;
47815 +}
47816 +
47817 +__u32
47818 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47819 +{
47820 + return 1;
47821 +}
47822 +
47823 +__u32
47824 +gr_acl_handle_access(const struct dentry * dentry,
47825 + const struct vfsmount * mnt, const int fmode)
47826 +{
47827 + return 1;
47828 +}
47829 +
47830 +__u32
47831 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47832 + mode_t mode)
47833 +{
47834 + return 1;
47835 +}
47836 +
47837 +__u32
47838 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47839 + mode_t mode)
47840 +{
47841 + return 1;
47842 +}
47843 +
47844 +__u32
47845 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47846 +{
47847 + return 1;
47848 +}
47849 +
47850 +__u32
47851 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47852 +{
47853 + return 1;
47854 +}
47855 +
47856 +void
47857 +grsecurity_init(void)
47858 +{
47859 + return;
47860 +}
47861 +
47862 +__u32
47863 +gr_acl_handle_mknod(const struct dentry * new_dentry,
47864 + const struct dentry * parent_dentry,
47865 + const struct vfsmount * parent_mnt,
47866 + const int mode)
47867 +{
47868 + return 1;
47869 +}
47870 +
47871 +__u32
47872 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
47873 + const struct dentry * parent_dentry,
47874 + const struct vfsmount * parent_mnt)
47875 +{
47876 + return 1;
47877 +}
47878 +
47879 +__u32
47880 +gr_acl_handle_symlink(const struct dentry * new_dentry,
47881 + const struct dentry * parent_dentry,
47882 + const struct vfsmount * parent_mnt, const char *from)
47883 +{
47884 + return 1;
47885 +}
47886 +
47887 +__u32
47888 +gr_acl_handle_link(const struct dentry * new_dentry,
47889 + const struct dentry * parent_dentry,
47890 + const struct vfsmount * parent_mnt,
47891 + const struct dentry * old_dentry,
47892 + const struct vfsmount * old_mnt, const char *to)
47893 +{
47894 + return 1;
47895 +}
47896 +
47897 +int
47898 +gr_acl_handle_rename(const struct dentry *new_dentry,
47899 + const struct dentry *parent_dentry,
47900 + const struct vfsmount *parent_mnt,
47901 + const struct dentry *old_dentry,
47902 + const struct inode *old_parent_inode,
47903 + const struct vfsmount *old_mnt, const char *newname)
47904 +{
47905 + return 0;
47906 +}
47907 +
47908 +int
47909 +gr_acl_handle_filldir(const struct file *file, const char *name,
47910 + const int namelen, const ino_t ino)
47911 +{
47912 + return 1;
47913 +}
47914 +
47915 +int
47916 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47917 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47918 +{
47919 + return 1;
47920 +}
47921 +
47922 +int
47923 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47924 +{
47925 + return 0;
47926 +}
47927 +
47928 +int
47929 +gr_search_accept(const struct socket *sock)
47930 +{
47931 + return 0;
47932 +}
47933 +
47934 +int
47935 +gr_search_listen(const struct socket *sock)
47936 +{
47937 + return 0;
47938 +}
47939 +
47940 +int
47941 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47942 +{
47943 + return 0;
47944 +}
47945 +
47946 +__u32
47947 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47948 +{
47949 + return 1;
47950 +}
47951 +
47952 +__u32
47953 +gr_acl_handle_creat(const struct dentry * dentry,
47954 + const struct dentry * p_dentry,
47955 + const struct vfsmount * p_mnt, const int fmode,
47956 + const int imode)
47957 +{
47958 + return 1;
47959 +}
47960 +
47961 +void
47962 +gr_acl_handle_exit(void)
47963 +{
47964 + return;
47965 +}
47966 +
47967 +int
47968 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47969 +{
47970 + return 1;
47971 +}
47972 +
47973 +void
47974 +gr_set_role_label(const uid_t uid, const gid_t gid)
47975 +{
47976 + return;
47977 +}
47978 +
47979 +int
47980 +gr_acl_handle_procpidmem(const struct task_struct *task)
47981 +{
47982 + return 0;
47983 +}
47984 +
47985 +int
47986 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47987 +{
47988 + return 0;
47989 +}
47990 +
47991 +int
47992 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47993 +{
47994 + return 0;
47995 +}
47996 +
47997 +void
47998 +gr_set_kernel_label(struct task_struct *task)
47999 +{
48000 + return;
48001 +}
48002 +
48003 +int
48004 +gr_check_user_change(int real, int effective, int fs)
48005 +{
48006 + return 0;
48007 +}
48008 +
48009 +int
48010 +gr_check_group_change(int real, int effective, int fs)
48011 +{
48012 + return 0;
48013 +}
48014 +
48015 +int gr_acl_enable_at_secure(void)
48016 +{
48017 + return 0;
48018 +}
48019 +
48020 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48021 +{
48022 + return dentry->d_inode->i_sb->s_dev;
48023 +}
48024 +
48025 +EXPORT_SYMBOL(gr_is_capable);
48026 +EXPORT_SYMBOL(gr_is_capable_nolog);
48027 +EXPORT_SYMBOL(gr_learn_resource);
48028 +EXPORT_SYMBOL(gr_set_kernel_label);
48029 +#ifdef CONFIG_SECURITY
48030 +EXPORT_SYMBOL(gr_check_user_change);
48031 +EXPORT_SYMBOL(gr_check_group_change);
48032 +#endif
48033 diff -urNp linux-2.6.39.4/grsecurity/grsec_exec.c linux-2.6.39.4/grsecurity/grsec_exec.c
48034 --- linux-2.6.39.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
48035 +++ linux-2.6.39.4/grsecurity/grsec_exec.c 2011-08-05 19:44:37.000000000 -0400
48036 @@ -0,0 +1,146 @@
48037 +#include <linux/kernel.h>
48038 +#include <linux/sched.h>
48039 +#include <linux/file.h>
48040 +#include <linux/binfmts.h>
48041 +#include <linux/fs.h>
48042 +#include <linux/types.h>
48043 +#include <linux/grdefs.h>
48044 +#include <linux/grinternal.h>
48045 +#include <linux/capability.h>
48046 +#include <linux/compat.h>
48047 +
48048 +#include <asm/uaccess.h>
48049 +
48050 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48051 +static char gr_exec_arg_buf[132];
48052 +static DEFINE_MUTEX(gr_exec_arg_mutex);
48053 +#endif
48054 +
48055 +int
48056 +gr_handle_nproc(void)
48057 +{
48058 +#ifdef CONFIG_GRKERNSEC_EXECVE
48059 + const struct cred *cred = current_cred();
48060 + if (grsec_enable_execve && cred->user &&
48061 + (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
48062 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
48063 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
48064 + return -EAGAIN;
48065 + }
48066 +#endif
48067 + return 0;
48068 +}
48069 +
48070 +void
48071 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
48072 +{
48073 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48074 + char *grarg = gr_exec_arg_buf;
48075 + unsigned int i, x, execlen = 0;
48076 + char c;
48077 +
48078 + if (!((grsec_enable_execlog && grsec_enable_group &&
48079 + in_group_p(grsec_audit_gid))
48080 + || (grsec_enable_execlog && !grsec_enable_group)))
48081 + return;
48082 +
48083 + mutex_lock(&gr_exec_arg_mutex);
48084 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
48085 +
48086 + if (unlikely(argv == NULL))
48087 + goto log;
48088 +
48089 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48090 + const char __user *p;
48091 + unsigned int len;
48092 +
48093 + if (copy_from_user(&p, argv + i, sizeof(p)))
48094 + goto log;
48095 + if (!p)
48096 + goto log;
48097 + len = strnlen_user(p, 128 - execlen);
48098 + if (len > 128 - execlen)
48099 + len = 128 - execlen;
48100 + else if (len > 0)
48101 + len--;
48102 + if (copy_from_user(grarg + execlen, p, len))
48103 + goto log;
48104 +
48105 + /* rewrite unprintable characters */
48106 + for (x = 0; x < len; x++) {
48107 + c = *(grarg + execlen + x);
48108 + if (c < 32 || c > 126)
48109 + *(grarg + execlen + x) = ' ';
48110 + }
48111 +
48112 + execlen += len;
48113 + *(grarg + execlen) = ' ';
48114 + *(grarg + execlen + 1) = '\0';
48115 + execlen++;
48116 + }
48117 +
48118 + log:
48119 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48120 + bprm->file->f_path.mnt, grarg);
48121 + mutex_unlock(&gr_exec_arg_mutex);
48122 +#endif
48123 + return;
48124 +}
48125 +
48126 +#ifdef CONFIG_COMPAT
48127 +void
48128 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
48129 +{
48130 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48131 + char *grarg = gr_exec_arg_buf;
48132 + unsigned int i, x, execlen = 0;
48133 + char c;
48134 +
48135 + if (!((grsec_enable_execlog && grsec_enable_group &&
48136 + in_group_p(grsec_audit_gid))
48137 + || (grsec_enable_execlog && !grsec_enable_group)))
48138 + return;
48139 +
48140 + mutex_lock(&gr_exec_arg_mutex);
48141 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
48142 +
48143 + if (unlikely(argv == NULL))
48144 + goto log;
48145 +
48146 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48147 + compat_uptr_t p;
48148 + unsigned int len;
48149 +
48150 + if (get_user(p, argv + i))
48151 + goto log;
48152 + len = strnlen_user(compat_ptr(p), 128 - execlen);
48153 + if (len > 128 - execlen)
48154 + len = 128 - execlen;
48155 + else if (len > 0)
48156 + len--;
48157 + else
48158 + goto log;
48159 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
48160 + goto log;
48161 +
48162 + /* rewrite unprintable characters */
48163 + for (x = 0; x < len; x++) {
48164 + c = *(grarg + execlen + x);
48165 + if (c < 32 || c > 126)
48166 + *(grarg + execlen + x) = ' ';
48167 + }
48168 +
48169 + execlen += len;
48170 + *(grarg + execlen) = ' ';
48171 + *(grarg + execlen + 1) = '\0';
48172 + execlen++;
48173 + }
48174 +
48175 + log:
48176 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48177 + bprm->file->f_path.mnt, grarg);
48178 + mutex_unlock(&gr_exec_arg_mutex);
48179 +#endif
48180 + return;
48181 +}
48182 +#endif
48183 diff -urNp linux-2.6.39.4/grsecurity/grsec_fifo.c linux-2.6.39.4/grsecurity/grsec_fifo.c
48184 --- linux-2.6.39.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
48185 +++ linux-2.6.39.4/grsecurity/grsec_fifo.c 2011-08-05 19:44:37.000000000 -0400
48186 @@ -0,0 +1,24 @@
48187 +#include <linux/kernel.h>
48188 +#include <linux/sched.h>
48189 +#include <linux/fs.h>
48190 +#include <linux/file.h>
48191 +#include <linux/grinternal.h>
48192 +
48193 +int
48194 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
48195 + const struct dentry *dir, const int flag, const int acc_mode)
48196 +{
48197 +#ifdef CONFIG_GRKERNSEC_FIFO
48198 + const struct cred *cred = current_cred();
48199 +
48200 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48201 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48202 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48203 + (cred->fsuid != dentry->d_inode->i_uid)) {
48204 + if (!inode_permission(dentry->d_inode, acc_mode))
48205 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48206 + return -EACCES;
48207 + }
48208 +#endif
48209 + return 0;
48210 +}
48211 diff -urNp linux-2.6.39.4/grsecurity/grsec_fork.c linux-2.6.39.4/grsecurity/grsec_fork.c
48212 --- linux-2.6.39.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48213 +++ linux-2.6.39.4/grsecurity/grsec_fork.c 2011-08-05 19:44:37.000000000 -0400
48214 @@ -0,0 +1,23 @@
48215 +#include <linux/kernel.h>
48216 +#include <linux/sched.h>
48217 +#include <linux/grsecurity.h>
48218 +#include <linux/grinternal.h>
48219 +#include <linux/errno.h>
48220 +
48221 +void
48222 +gr_log_forkfail(const int retval)
48223 +{
48224 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48225 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48226 + switch (retval) {
48227 + case -EAGAIN:
48228 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48229 + break;
48230 + case -ENOMEM:
48231 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48232 + break;
48233 + }
48234 + }
48235 +#endif
48236 + return;
48237 +}
48238 diff -urNp linux-2.6.39.4/grsecurity/grsec_init.c linux-2.6.39.4/grsecurity/grsec_init.c
48239 --- linux-2.6.39.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48240 +++ linux-2.6.39.4/grsecurity/grsec_init.c 2011-08-05 19:44:37.000000000 -0400
48241 @@ -0,0 +1,273 @@
48242 +#include <linux/kernel.h>
48243 +#include <linux/sched.h>
48244 +#include <linux/mm.h>
48245 +#include <linux/gracl.h>
48246 +#include <linux/slab.h>
48247 +#include <linux/vmalloc.h>
48248 +#include <linux/percpu.h>
48249 +#include <linux/module.h>
48250 +
48251 +int grsec_enable_brute;
48252 +int grsec_enable_link;
48253 +int grsec_enable_dmesg;
48254 +int grsec_enable_harden_ptrace;
48255 +int grsec_enable_fifo;
48256 +int grsec_enable_execve;
48257 +int grsec_enable_execlog;
48258 +int grsec_enable_signal;
48259 +int grsec_enable_forkfail;
48260 +int grsec_enable_audit_ptrace;
48261 +int grsec_enable_time;
48262 +int grsec_enable_audit_textrel;
48263 +int grsec_enable_group;
48264 +int grsec_audit_gid;
48265 +int grsec_enable_chdir;
48266 +int grsec_enable_mount;
48267 +int grsec_enable_rofs;
48268 +int grsec_enable_chroot_findtask;
48269 +int grsec_enable_chroot_mount;
48270 +int grsec_enable_chroot_shmat;
48271 +int grsec_enable_chroot_fchdir;
48272 +int grsec_enable_chroot_double;
48273 +int grsec_enable_chroot_pivot;
48274 +int grsec_enable_chroot_chdir;
48275 +int grsec_enable_chroot_chmod;
48276 +int grsec_enable_chroot_mknod;
48277 +int grsec_enable_chroot_nice;
48278 +int grsec_enable_chroot_execlog;
48279 +int grsec_enable_chroot_caps;
48280 +int grsec_enable_chroot_sysctl;
48281 +int grsec_enable_chroot_unix;
48282 +int grsec_enable_tpe;
48283 +int grsec_tpe_gid;
48284 +int grsec_enable_blackhole;
48285 +#ifdef CONFIG_IPV6_MODULE
48286 +EXPORT_SYMBOL(grsec_enable_blackhole);
48287 +#endif
48288 +int grsec_lastack_retries;
48289 +int grsec_enable_tpe_all;
48290 +int grsec_enable_tpe_invert;
48291 +int grsec_enable_socket_all;
48292 +int grsec_socket_all_gid;
48293 +int grsec_enable_socket_client;
48294 +int grsec_socket_client_gid;
48295 +int grsec_enable_socket_server;
48296 +int grsec_socket_server_gid;
48297 +int grsec_resource_logging;
48298 +int grsec_disable_privio;
48299 +int grsec_enable_log_rwxmaps;
48300 +int grsec_lock;
48301 +
48302 +DEFINE_SPINLOCK(grsec_alert_lock);
48303 +unsigned long grsec_alert_wtime = 0;
48304 +unsigned long grsec_alert_fyet = 0;
48305 +
48306 +DEFINE_SPINLOCK(grsec_audit_lock);
48307 +
48308 +DEFINE_RWLOCK(grsec_exec_file_lock);
48309 +
48310 +char *gr_shared_page[4];
48311 +
48312 +char *gr_alert_log_fmt;
48313 +char *gr_audit_log_fmt;
48314 +char *gr_alert_log_buf;
48315 +char *gr_audit_log_buf;
48316 +
48317 +extern struct gr_arg *gr_usermode;
48318 +extern unsigned char *gr_system_salt;
48319 +extern unsigned char *gr_system_sum;
48320 +
48321 +void __init
48322 +grsecurity_init(void)
48323 +{
48324 + int j;
48325 + /* create the per-cpu shared pages */
48326 +
48327 +#ifdef CONFIG_X86
48328 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48329 +#endif
48330 +
48331 + for (j = 0; j < 4; j++) {
48332 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48333 + if (gr_shared_page[j] == NULL) {
48334 + panic("Unable to allocate grsecurity shared page");
48335 + return;
48336 + }
48337 + }
48338 +
48339 + /* allocate log buffers */
48340 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48341 + if (!gr_alert_log_fmt) {
48342 + panic("Unable to allocate grsecurity alert log format buffer");
48343 + return;
48344 + }
48345 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48346 + if (!gr_audit_log_fmt) {
48347 + panic("Unable to allocate grsecurity audit log format buffer");
48348 + return;
48349 + }
48350 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48351 + if (!gr_alert_log_buf) {
48352 + panic("Unable to allocate grsecurity alert log buffer");
48353 + return;
48354 + }
48355 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48356 + if (!gr_audit_log_buf) {
48357 + panic("Unable to allocate grsecurity audit log buffer");
48358 + return;
48359 + }
48360 +
48361 + /* allocate memory for authentication structure */
48362 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48363 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48364 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48365 +
48366 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48367 + panic("Unable to allocate grsecurity authentication structure");
48368 + return;
48369 + }
48370 +
48371 +
48372 +#ifdef CONFIG_GRKERNSEC_IO
48373 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48374 + grsec_disable_privio = 1;
48375 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48376 + grsec_disable_privio = 1;
48377 +#else
48378 + grsec_disable_privio = 0;
48379 +#endif
48380 +#endif
48381 +
48382 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48383 + /* for backward compatibility, tpe_invert always defaults to on if
48384 + enabled in the kernel
48385 + */
48386 + grsec_enable_tpe_invert = 1;
48387 +#endif
48388 +
48389 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48390 +#ifndef CONFIG_GRKERNSEC_SYSCTL
48391 + grsec_lock = 1;
48392 +#endif
48393 +
48394 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48395 + grsec_enable_audit_textrel = 1;
48396 +#endif
48397 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48398 + grsec_enable_log_rwxmaps = 1;
48399 +#endif
48400 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48401 + grsec_enable_group = 1;
48402 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48403 +#endif
48404 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48405 + grsec_enable_chdir = 1;
48406 +#endif
48407 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48408 + grsec_enable_harden_ptrace = 1;
48409 +#endif
48410 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48411 + grsec_enable_mount = 1;
48412 +#endif
48413 +#ifdef CONFIG_GRKERNSEC_LINK
48414 + grsec_enable_link = 1;
48415 +#endif
48416 +#ifdef CONFIG_GRKERNSEC_BRUTE
48417 + grsec_enable_brute = 1;
48418 +#endif
48419 +#ifdef CONFIG_GRKERNSEC_DMESG
48420 + grsec_enable_dmesg = 1;
48421 +#endif
48422 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48423 + grsec_enable_blackhole = 1;
48424 + grsec_lastack_retries = 4;
48425 +#endif
48426 +#ifdef CONFIG_GRKERNSEC_FIFO
48427 + grsec_enable_fifo = 1;
48428 +#endif
48429 +#ifdef CONFIG_GRKERNSEC_EXECVE
48430 + grsec_enable_execve = 1;
48431 +#endif
48432 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48433 + grsec_enable_execlog = 1;
48434 +#endif
48435 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48436 + grsec_enable_signal = 1;
48437 +#endif
48438 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48439 + grsec_enable_forkfail = 1;
48440 +#endif
48441 +#ifdef CONFIG_GRKERNSEC_TIME
48442 + grsec_enable_time = 1;
48443 +#endif
48444 +#ifdef CONFIG_GRKERNSEC_RESLOG
48445 + grsec_resource_logging = 1;
48446 +#endif
48447 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48448 + grsec_enable_chroot_findtask = 1;
48449 +#endif
48450 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48451 + grsec_enable_chroot_unix = 1;
48452 +#endif
48453 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48454 + grsec_enable_chroot_mount = 1;
48455 +#endif
48456 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48457 + grsec_enable_chroot_fchdir = 1;
48458 +#endif
48459 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48460 + grsec_enable_chroot_shmat = 1;
48461 +#endif
48462 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48463 + grsec_enable_audit_ptrace = 1;
48464 +#endif
48465 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48466 + grsec_enable_chroot_double = 1;
48467 +#endif
48468 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48469 + grsec_enable_chroot_pivot = 1;
48470 +#endif
48471 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48472 + grsec_enable_chroot_chdir = 1;
48473 +#endif
48474 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48475 + grsec_enable_chroot_chmod = 1;
48476 +#endif
48477 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48478 + grsec_enable_chroot_mknod = 1;
48479 +#endif
48480 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48481 + grsec_enable_chroot_nice = 1;
48482 +#endif
48483 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48484 + grsec_enable_chroot_execlog = 1;
48485 +#endif
48486 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48487 + grsec_enable_chroot_caps = 1;
48488 +#endif
48489 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48490 + grsec_enable_chroot_sysctl = 1;
48491 +#endif
48492 +#ifdef CONFIG_GRKERNSEC_TPE
48493 + grsec_enable_tpe = 1;
48494 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48495 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
48496 + grsec_enable_tpe_all = 1;
48497 +#endif
48498 +#endif
48499 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48500 + grsec_enable_socket_all = 1;
48501 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48502 +#endif
48503 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48504 + grsec_enable_socket_client = 1;
48505 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48506 +#endif
48507 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48508 + grsec_enable_socket_server = 1;
48509 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48510 +#endif
48511 +#endif
48512 +
48513 + return;
48514 +}
48515 diff -urNp linux-2.6.39.4/grsecurity/grsec_link.c linux-2.6.39.4/grsecurity/grsec_link.c
48516 --- linux-2.6.39.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48517 +++ linux-2.6.39.4/grsecurity/grsec_link.c 2011-08-05 19:44:37.000000000 -0400
48518 @@ -0,0 +1,43 @@
48519 +#include <linux/kernel.h>
48520 +#include <linux/sched.h>
48521 +#include <linux/fs.h>
48522 +#include <linux/file.h>
48523 +#include <linux/grinternal.h>
48524 +
48525 +int
48526 +gr_handle_follow_link(const struct inode *parent,
48527 + const struct inode *inode,
48528 + const struct dentry *dentry, const struct vfsmount *mnt)
48529 +{
48530 +#ifdef CONFIG_GRKERNSEC_LINK
48531 + const struct cred *cred = current_cred();
48532 +
48533 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48534 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48535 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48536 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48537 + return -EACCES;
48538 + }
48539 +#endif
48540 + return 0;
48541 +}
48542 +
48543 +int
48544 +gr_handle_hardlink(const struct dentry *dentry,
48545 + const struct vfsmount *mnt,
48546 + struct inode *inode, const int mode, const char *to)
48547 +{
48548 +#ifdef CONFIG_GRKERNSEC_LINK
48549 + const struct cred *cred = current_cred();
48550 +
48551 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48552 + (!S_ISREG(mode) || (mode & S_ISUID) ||
48553 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48554 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48555 + !capable(CAP_FOWNER) && cred->uid) {
48556 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48557 + return -EPERM;
48558 + }
48559 +#endif
48560 + return 0;
48561 +}
48562 diff -urNp linux-2.6.39.4/grsecurity/grsec_log.c linux-2.6.39.4/grsecurity/grsec_log.c
48563 --- linux-2.6.39.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48564 +++ linux-2.6.39.4/grsecurity/grsec_log.c 2011-08-05 19:44:37.000000000 -0400
48565 @@ -0,0 +1,310 @@
48566 +#include <linux/kernel.h>
48567 +#include <linux/sched.h>
48568 +#include <linux/file.h>
48569 +#include <linux/tty.h>
48570 +#include <linux/fs.h>
48571 +#include <linux/grinternal.h>
48572 +
48573 +#ifdef CONFIG_TREE_PREEMPT_RCU
48574 +#define DISABLE_PREEMPT() preempt_disable()
48575 +#define ENABLE_PREEMPT() preempt_enable()
48576 +#else
48577 +#define DISABLE_PREEMPT()
48578 +#define ENABLE_PREEMPT()
48579 +#endif
48580 +
48581 +#define BEGIN_LOCKS(x) \
48582 + DISABLE_PREEMPT(); \
48583 + rcu_read_lock(); \
48584 + read_lock(&tasklist_lock); \
48585 + read_lock(&grsec_exec_file_lock); \
48586 + if (x != GR_DO_AUDIT) \
48587 + spin_lock(&grsec_alert_lock); \
48588 + else \
48589 + spin_lock(&grsec_audit_lock)
48590 +
48591 +#define END_LOCKS(x) \
48592 + if (x != GR_DO_AUDIT) \
48593 + spin_unlock(&grsec_alert_lock); \
48594 + else \
48595 + spin_unlock(&grsec_audit_lock); \
48596 + read_unlock(&grsec_exec_file_lock); \
48597 + read_unlock(&tasklist_lock); \
48598 + rcu_read_unlock(); \
48599 + ENABLE_PREEMPT(); \
48600 + if (x == GR_DONT_AUDIT) \
48601 + gr_handle_alertkill(current)
48602 +
48603 +enum {
48604 + FLOODING,
48605 + NO_FLOODING
48606 +};
48607 +
48608 +extern char *gr_alert_log_fmt;
48609 +extern char *gr_audit_log_fmt;
48610 +extern char *gr_alert_log_buf;
48611 +extern char *gr_audit_log_buf;
48612 +
48613 +static int gr_log_start(int audit)
48614 +{
48615 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48616 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48617 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48618 +
48619 + if (audit == GR_DO_AUDIT)
48620 + goto set_fmt;
48621 +
48622 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48623 + grsec_alert_wtime = jiffies;
48624 + grsec_alert_fyet = 0;
48625 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48626 + grsec_alert_fyet++;
48627 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48628 + grsec_alert_wtime = jiffies;
48629 + grsec_alert_fyet++;
48630 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48631 + return FLOODING;
48632 + } else return FLOODING;
48633 +
48634 +set_fmt:
48635 + memset(buf, 0, PAGE_SIZE);
48636 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
48637 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48638 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48639 + } else if (current->signal->curr_ip) {
48640 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48641 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48642 + } else if (gr_acl_is_enabled()) {
48643 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48644 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48645 + } else {
48646 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
48647 + strcpy(buf, fmt);
48648 + }
48649 +
48650 + return NO_FLOODING;
48651 +}
48652 +
48653 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48654 + __attribute__ ((format (printf, 2, 0)));
48655 +
48656 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48657 +{
48658 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48659 + unsigned int len = strlen(buf);
48660 +
48661 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48662 +
48663 + return;
48664 +}
48665 +
48666 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48667 + __attribute__ ((format (printf, 2, 3)));
48668 +
48669 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48670 +{
48671 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48672 + unsigned int len = strlen(buf);
48673 + va_list ap;
48674 +
48675 + va_start(ap, msg);
48676 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48677 + va_end(ap);
48678 +
48679 + return;
48680 +}
48681 +
48682 +static void gr_log_end(int audit)
48683 +{
48684 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48685 + unsigned int len = strlen(buf);
48686 +
48687 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48688 + printk("%s\n", buf);
48689 +
48690 + return;
48691 +}
48692 +
48693 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48694 +{
48695 + int logtype;
48696 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48697 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48698 + void *voidptr = NULL;
48699 + int num1 = 0, num2 = 0;
48700 + unsigned long ulong1 = 0, ulong2 = 0;
48701 + struct dentry *dentry = NULL;
48702 + struct vfsmount *mnt = NULL;
48703 + struct file *file = NULL;
48704 + struct task_struct *task = NULL;
48705 + const struct cred *cred, *pcred;
48706 + va_list ap;
48707 +
48708 + BEGIN_LOCKS(audit);
48709 + logtype = gr_log_start(audit);
48710 + if (logtype == FLOODING) {
48711 + END_LOCKS(audit);
48712 + return;
48713 + }
48714 + va_start(ap, argtypes);
48715 + switch (argtypes) {
48716 + case GR_TTYSNIFF:
48717 + task = va_arg(ap, struct task_struct *);
48718 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48719 + break;
48720 + case GR_SYSCTL_HIDDEN:
48721 + str1 = va_arg(ap, char *);
48722 + gr_log_middle_varargs(audit, msg, result, str1);
48723 + break;
48724 + case GR_RBAC:
48725 + dentry = va_arg(ap, struct dentry *);
48726 + mnt = va_arg(ap, struct vfsmount *);
48727 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48728 + break;
48729 + case GR_RBAC_STR:
48730 + dentry = va_arg(ap, struct dentry *);
48731 + mnt = va_arg(ap, struct vfsmount *);
48732 + str1 = va_arg(ap, char *);
48733 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48734 + break;
48735 + case GR_STR_RBAC:
48736 + str1 = va_arg(ap, char *);
48737 + dentry = va_arg(ap, struct dentry *);
48738 + mnt = va_arg(ap, struct vfsmount *);
48739 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48740 + break;
48741 + case GR_RBAC_MODE2:
48742 + dentry = va_arg(ap, struct dentry *);
48743 + mnt = va_arg(ap, struct vfsmount *);
48744 + str1 = va_arg(ap, char *);
48745 + str2 = va_arg(ap, char *);
48746 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48747 + break;
48748 + case GR_RBAC_MODE3:
48749 + dentry = va_arg(ap, struct dentry *);
48750 + mnt = va_arg(ap, struct vfsmount *);
48751 + str1 = va_arg(ap, char *);
48752 + str2 = va_arg(ap, char *);
48753 + str3 = va_arg(ap, char *);
48754 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48755 + break;
48756 + case GR_FILENAME:
48757 + dentry = va_arg(ap, struct dentry *);
48758 + mnt = va_arg(ap, struct vfsmount *);
48759 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48760 + break;
48761 + case GR_STR_FILENAME:
48762 + str1 = va_arg(ap, char *);
48763 + dentry = va_arg(ap, struct dentry *);
48764 + mnt = va_arg(ap, struct vfsmount *);
48765 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48766 + break;
48767 + case GR_FILENAME_STR:
48768 + dentry = va_arg(ap, struct dentry *);
48769 + mnt = va_arg(ap, struct vfsmount *);
48770 + str1 = va_arg(ap, char *);
48771 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48772 + break;
48773 + case GR_FILENAME_TWO_INT:
48774 + dentry = va_arg(ap, struct dentry *);
48775 + mnt = va_arg(ap, struct vfsmount *);
48776 + num1 = va_arg(ap, int);
48777 + num2 = va_arg(ap, int);
48778 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48779 + break;
48780 + case GR_FILENAME_TWO_INT_STR:
48781 + dentry = va_arg(ap, struct dentry *);
48782 + mnt = va_arg(ap, struct vfsmount *);
48783 + num1 = va_arg(ap, int);
48784 + num2 = va_arg(ap, int);
48785 + str1 = va_arg(ap, char *);
48786 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48787 + break;
48788 + case GR_TEXTREL:
48789 + file = va_arg(ap, struct file *);
48790 + ulong1 = va_arg(ap, unsigned long);
48791 + ulong2 = va_arg(ap, unsigned long);
48792 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48793 + break;
48794 + case GR_PTRACE:
48795 + task = va_arg(ap, struct task_struct *);
48796 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48797 + break;
48798 + case GR_RESOURCE:
48799 + task = va_arg(ap, struct task_struct *);
48800 + cred = __task_cred(task);
48801 + pcred = __task_cred(task->real_parent);
48802 + ulong1 = va_arg(ap, unsigned long);
48803 + str1 = va_arg(ap, char *);
48804 + ulong2 = va_arg(ap, unsigned long);
48805 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48806 + break;
48807 + case GR_CAP:
48808 + task = va_arg(ap, struct task_struct *);
48809 + cred = __task_cred(task);
48810 + pcred = __task_cred(task->real_parent);
48811 + str1 = va_arg(ap, char *);
48812 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48813 + break;
48814 + case GR_SIG:
48815 + str1 = va_arg(ap, char *);
48816 + voidptr = va_arg(ap, void *);
48817 + gr_log_middle_varargs(audit, msg, str1, voidptr);
48818 + break;
48819 + case GR_SIG2:
48820 + task = va_arg(ap, struct task_struct *);
48821 + cred = __task_cred(task);
48822 + pcred = __task_cred(task->real_parent);
48823 + num1 = va_arg(ap, int);
48824 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48825 + break;
48826 + case GR_CRASH1:
48827 + task = va_arg(ap, struct task_struct *);
48828 + cred = __task_cred(task);
48829 + pcred = __task_cred(task->real_parent);
48830 + ulong1 = va_arg(ap, unsigned long);
48831 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48832 + break;
48833 + case GR_CRASH2:
48834 + task = va_arg(ap, struct task_struct *);
48835 + cred = __task_cred(task);
48836 + pcred = __task_cred(task->real_parent);
48837 + ulong1 = va_arg(ap, unsigned long);
48838 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48839 + break;
48840 + case GR_RWXMAP:
48841 + file = va_arg(ap, struct file *);
48842 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48843 + break;
48844 + case GR_PSACCT:
48845 + {
48846 + unsigned int wday, cday;
48847 + __u8 whr, chr;
48848 + __u8 wmin, cmin;
48849 + __u8 wsec, csec;
48850 + char cur_tty[64] = { 0 };
48851 + char parent_tty[64] = { 0 };
48852 +
48853 + task = va_arg(ap, struct task_struct *);
48854 + wday = va_arg(ap, unsigned int);
48855 + cday = va_arg(ap, unsigned int);
48856 + whr = va_arg(ap, int);
48857 + chr = va_arg(ap, int);
48858 + wmin = va_arg(ap, int);
48859 + cmin = va_arg(ap, int);
48860 + wsec = va_arg(ap, int);
48861 + csec = va_arg(ap, int);
48862 + ulong1 = va_arg(ap, unsigned long);
48863 + cred = __task_cred(task);
48864 + pcred = __task_cred(task->real_parent);
48865 +
48866 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48867 + }
48868 + break;
48869 + default:
48870 + gr_log_middle(audit, msg, ap);
48871 + }
48872 + va_end(ap);
48873 + gr_log_end(audit);
48874 + END_LOCKS(audit);
48875 +}
48876 diff -urNp linux-2.6.39.4/grsecurity/grsec_mem.c linux-2.6.39.4/grsecurity/grsec_mem.c
48877 --- linux-2.6.39.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48878 +++ linux-2.6.39.4/grsecurity/grsec_mem.c 2011-08-05 19:44:37.000000000 -0400
48879 @@ -0,0 +1,33 @@
48880 +#include <linux/kernel.h>
48881 +#include <linux/sched.h>
48882 +#include <linux/mm.h>
48883 +#include <linux/mman.h>
48884 +#include <linux/grinternal.h>
48885 +
48886 +void
48887 +gr_handle_ioperm(void)
48888 +{
48889 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48890 + return;
48891 +}
48892 +
48893 +void
48894 +gr_handle_iopl(void)
48895 +{
48896 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48897 + return;
48898 +}
48899 +
48900 +void
48901 +gr_handle_mem_readwrite(u64 from, u64 to)
48902 +{
48903 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48904 + return;
48905 +}
48906 +
48907 +void
48908 +gr_handle_vm86(void)
48909 +{
48910 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48911 + return;
48912 +}
48913 diff -urNp linux-2.6.39.4/grsecurity/grsec_mount.c linux-2.6.39.4/grsecurity/grsec_mount.c
48914 --- linux-2.6.39.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48915 +++ linux-2.6.39.4/grsecurity/grsec_mount.c 2011-08-05 19:44:37.000000000 -0400
48916 @@ -0,0 +1,62 @@
48917 +#include <linux/kernel.h>
48918 +#include <linux/sched.h>
48919 +#include <linux/mount.h>
48920 +#include <linux/grsecurity.h>
48921 +#include <linux/grinternal.h>
48922 +
48923 +void
48924 +gr_log_remount(const char *devname, const int retval)
48925 +{
48926 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48927 + if (grsec_enable_mount && (retval >= 0))
48928 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48929 +#endif
48930 + return;
48931 +}
48932 +
48933 +void
48934 +gr_log_unmount(const char *devname, const int retval)
48935 +{
48936 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48937 + if (grsec_enable_mount && (retval >= 0))
48938 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48939 +#endif
48940 + return;
48941 +}
48942 +
48943 +void
48944 +gr_log_mount(const char *from, const char *to, const int retval)
48945 +{
48946 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48947 + if (grsec_enable_mount && (retval >= 0))
48948 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48949 +#endif
48950 + return;
48951 +}
48952 +
48953 +int
48954 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48955 +{
48956 +#ifdef CONFIG_GRKERNSEC_ROFS
48957 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48958 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48959 + return -EPERM;
48960 + } else
48961 + return 0;
48962 +#endif
48963 + return 0;
48964 +}
48965 +
48966 +int
48967 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48968 +{
48969 +#ifdef CONFIG_GRKERNSEC_ROFS
48970 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48971 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48972 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48973 + return -EPERM;
48974 + } else
48975 + return 0;
48976 +#endif
48977 + return 0;
48978 +}
48979 diff -urNp linux-2.6.39.4/grsecurity/grsec_pax.c linux-2.6.39.4/grsecurity/grsec_pax.c
48980 --- linux-2.6.39.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48981 +++ linux-2.6.39.4/grsecurity/grsec_pax.c 2011-08-05 19:44:37.000000000 -0400
48982 @@ -0,0 +1,36 @@
48983 +#include <linux/kernel.h>
48984 +#include <linux/sched.h>
48985 +#include <linux/mm.h>
48986 +#include <linux/file.h>
48987 +#include <linux/grinternal.h>
48988 +#include <linux/grsecurity.h>
48989 +
48990 +void
48991 +gr_log_textrel(struct vm_area_struct * vma)
48992 +{
48993 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48994 + if (grsec_enable_audit_textrel)
48995 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48996 +#endif
48997 + return;
48998 +}
48999 +
49000 +void
49001 +gr_log_rwxmmap(struct file *file)
49002 +{
49003 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49004 + if (grsec_enable_log_rwxmaps)
49005 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
49006 +#endif
49007 + return;
49008 +}
49009 +
49010 +void
49011 +gr_log_rwxmprotect(struct file *file)
49012 +{
49013 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49014 + if (grsec_enable_log_rwxmaps)
49015 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
49016 +#endif
49017 + return;
49018 +}
49019 diff -urNp linux-2.6.39.4/grsecurity/grsec_ptrace.c linux-2.6.39.4/grsecurity/grsec_ptrace.c
49020 --- linux-2.6.39.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
49021 +++ linux-2.6.39.4/grsecurity/grsec_ptrace.c 2011-08-05 19:44:37.000000000 -0400
49022 @@ -0,0 +1,14 @@
49023 +#include <linux/kernel.h>
49024 +#include <linux/sched.h>
49025 +#include <linux/grinternal.h>
49026 +#include <linux/grsecurity.h>
49027 +
49028 +void
49029 +gr_audit_ptrace(struct task_struct *task)
49030 +{
49031 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49032 + if (grsec_enable_audit_ptrace)
49033 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
49034 +#endif
49035 + return;
49036 +}
49037 diff -urNp linux-2.6.39.4/grsecurity/grsec_sig.c linux-2.6.39.4/grsecurity/grsec_sig.c
49038 --- linux-2.6.39.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
49039 +++ linux-2.6.39.4/grsecurity/grsec_sig.c 2011-08-05 19:44:37.000000000 -0400
49040 @@ -0,0 +1,206 @@
49041 +#include <linux/kernel.h>
49042 +#include <linux/sched.h>
49043 +#include <linux/delay.h>
49044 +#include <linux/grsecurity.h>
49045 +#include <linux/grinternal.h>
49046 +#include <linux/hardirq.h>
49047 +
49048 +char *signames[] = {
49049 + [SIGSEGV] = "Segmentation fault",
49050 + [SIGILL] = "Illegal instruction",
49051 + [SIGABRT] = "Abort",
49052 + [SIGBUS] = "Invalid alignment/Bus error"
49053 +};
49054 +
49055 +void
49056 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
49057 +{
49058 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49059 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
49060 + (sig == SIGABRT) || (sig == SIGBUS))) {
49061 + if (t->pid == current->pid) {
49062 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
49063 + } else {
49064 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
49065 + }
49066 + }
49067 +#endif
49068 + return;
49069 +}
49070 +
49071 +int
49072 +gr_handle_signal(const struct task_struct *p, const int sig)
49073 +{
49074 +#ifdef CONFIG_GRKERNSEC
49075 + if (current->pid > 1 && gr_check_protected_task(p)) {
49076 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
49077 + return -EPERM;
49078 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
49079 + return -EPERM;
49080 + }
49081 +#endif
49082 + return 0;
49083 +}
49084 +
49085 +#ifdef CONFIG_GRKERNSEC
49086 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
49087 +
49088 +int gr_fake_force_sig(int sig, struct task_struct *t)
49089 +{
49090 + unsigned long int flags;
49091 + int ret, blocked, ignored;
49092 + struct k_sigaction *action;
49093 +
49094 + spin_lock_irqsave(&t->sighand->siglock, flags);
49095 + action = &t->sighand->action[sig-1];
49096 + ignored = action->sa.sa_handler == SIG_IGN;
49097 + blocked = sigismember(&t->blocked, sig);
49098 + if (blocked || ignored) {
49099 + action->sa.sa_handler = SIG_DFL;
49100 + if (blocked) {
49101 + sigdelset(&t->blocked, sig);
49102 + recalc_sigpending_and_wake(t);
49103 + }
49104 + }
49105 + if (action->sa.sa_handler == SIG_DFL)
49106 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
49107 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
49108 +
49109 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
49110 +
49111 + return ret;
49112 +}
49113 +#endif
49114 +
49115 +#ifdef CONFIG_GRKERNSEC_BRUTE
49116 +#define GR_USER_BAN_TIME (15 * 60)
49117 +
49118 +static int __get_dumpable(unsigned long mm_flags)
49119 +{
49120 + int ret;
49121 +
49122 + ret = mm_flags & MMF_DUMPABLE_MASK;
49123 + return (ret >= 2) ? 2 : ret;
49124 +}
49125 +#endif
49126 +
49127 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
49128 +{
49129 +#ifdef CONFIG_GRKERNSEC_BRUTE
49130 + uid_t uid = 0;
49131 +
49132 + if (!grsec_enable_brute)
49133 + return;
49134 +
49135 + rcu_read_lock();
49136 + read_lock(&tasklist_lock);
49137 + read_lock(&grsec_exec_file_lock);
49138 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
49139 + p->real_parent->brute = 1;
49140 + else {
49141 + const struct cred *cred = __task_cred(p), *cred2;
49142 + struct task_struct *tsk, *tsk2;
49143 +
49144 + if (!__get_dumpable(mm_flags) && cred->uid) {
49145 + struct user_struct *user;
49146 +
49147 + uid = cred->uid;
49148 +
49149 + /* this is put upon execution past expiration */
49150 + user = find_user(uid);
49151 + if (user == NULL)
49152 + goto unlock;
49153 + user->banned = 1;
49154 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
49155 + if (user->ban_expires == ~0UL)
49156 + user->ban_expires--;
49157 +
49158 + do_each_thread(tsk2, tsk) {
49159 + cred2 = __task_cred(tsk);
49160 + if (tsk != p && cred2->uid == uid)
49161 + gr_fake_force_sig(SIGKILL, tsk);
49162 + } while_each_thread(tsk2, tsk);
49163 + }
49164 + }
49165 +unlock:
49166 + read_unlock(&grsec_exec_file_lock);
49167 + read_unlock(&tasklist_lock);
49168 + rcu_read_unlock();
49169 +
49170 + if (uid)
49171 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
49172 +
49173 +#endif
49174 + return;
49175 +}
49176 +
49177 +void gr_handle_brute_check(void)
49178 +{
49179 +#ifdef CONFIG_GRKERNSEC_BRUTE
49180 + if (current->brute)
49181 + msleep(30 * 1000);
49182 +#endif
49183 + return;
49184 +}
49185 +
49186 +void gr_handle_kernel_exploit(void)
49187 +{
49188 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
49189 + const struct cred *cred;
49190 + struct task_struct *tsk, *tsk2;
49191 + struct user_struct *user;
49192 + uid_t uid;
49193 +
49194 + if (in_irq() || in_serving_softirq() || in_nmi())
49195 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
49196 +
49197 + uid = current_uid();
49198 +
49199 + if (uid == 0)
49200 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
49201 + else {
49202 + /* kill all the processes of this user, hold a reference
49203 + to their creds struct, and prevent them from creating
49204 + another process until system reset
49205 + */
49206 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49207 + /* we intentionally leak this ref */
49208 + user = get_uid(current->cred->user);
49209 + if (user) {
49210 + user->banned = 1;
49211 + user->ban_expires = ~0UL;
49212 + }
49213 +
49214 + read_lock(&tasklist_lock);
49215 + do_each_thread(tsk2, tsk) {
49216 + cred = __task_cred(tsk);
49217 + if (cred->uid == uid)
49218 + gr_fake_force_sig(SIGKILL, tsk);
49219 + } while_each_thread(tsk2, tsk);
49220 + read_unlock(&tasklist_lock);
49221 + }
49222 +#endif
49223 +}
49224 +
49225 +int __gr_process_user_ban(struct user_struct *user)
49226 +{
49227 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49228 + if (unlikely(user->banned)) {
49229 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49230 + user->banned = 0;
49231 + user->ban_expires = 0;
49232 + free_uid(user);
49233 + } else
49234 + return -EPERM;
49235 + }
49236 +#endif
49237 + return 0;
49238 +}
49239 +
49240 +int gr_process_user_ban(void)
49241 +{
49242 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49243 + return __gr_process_user_ban(current->cred->user);
49244 +#endif
49245 + return 0;
49246 +}
49247 diff -urNp linux-2.6.39.4/grsecurity/grsec_sock.c linux-2.6.39.4/grsecurity/grsec_sock.c
49248 --- linux-2.6.39.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49249 +++ linux-2.6.39.4/grsecurity/grsec_sock.c 2011-08-05 19:44:37.000000000 -0400
49250 @@ -0,0 +1,244 @@
49251 +#include <linux/kernel.h>
49252 +#include <linux/module.h>
49253 +#include <linux/sched.h>
49254 +#include <linux/file.h>
49255 +#include <linux/net.h>
49256 +#include <linux/in.h>
49257 +#include <linux/ip.h>
49258 +#include <net/sock.h>
49259 +#include <net/inet_sock.h>
49260 +#include <linux/grsecurity.h>
49261 +#include <linux/grinternal.h>
49262 +#include <linux/gracl.h>
49263 +
49264 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49265 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49266 +
49267 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
49268 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
49269 +
49270 +#ifdef CONFIG_UNIX_MODULE
49271 +EXPORT_SYMBOL(gr_acl_handle_unix);
49272 +EXPORT_SYMBOL(gr_acl_handle_mknod);
49273 +EXPORT_SYMBOL(gr_handle_chroot_unix);
49274 +EXPORT_SYMBOL(gr_handle_create);
49275 +#endif
49276 +
49277 +#ifdef CONFIG_GRKERNSEC
49278 +#define gr_conn_table_size 32749
49279 +struct conn_table_entry {
49280 + struct conn_table_entry *next;
49281 + struct signal_struct *sig;
49282 +};
49283 +
49284 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49285 +DEFINE_SPINLOCK(gr_conn_table_lock);
49286 +
49287 +extern const char * gr_socktype_to_name(unsigned char type);
49288 +extern const char * gr_proto_to_name(unsigned char proto);
49289 +extern const char * gr_sockfamily_to_name(unsigned char family);
49290 +
49291 +static __inline__ int
49292 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49293 +{
49294 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49295 +}
49296 +
49297 +static __inline__ int
49298 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49299 + __u16 sport, __u16 dport)
49300 +{
49301 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49302 + sig->gr_sport == sport && sig->gr_dport == dport))
49303 + return 1;
49304 + else
49305 + return 0;
49306 +}
49307 +
49308 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49309 +{
49310 + struct conn_table_entry **match;
49311 + unsigned int index;
49312 +
49313 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49314 + sig->gr_sport, sig->gr_dport,
49315 + gr_conn_table_size);
49316 +
49317 + newent->sig = sig;
49318 +
49319 + match = &gr_conn_table[index];
49320 + newent->next = *match;
49321 + *match = newent;
49322 +
49323 + return;
49324 +}
49325 +
49326 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49327 +{
49328 + struct conn_table_entry *match, *last = NULL;
49329 + unsigned int index;
49330 +
49331 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49332 + sig->gr_sport, sig->gr_dport,
49333 + gr_conn_table_size);
49334 +
49335 + match = gr_conn_table[index];
49336 + while (match && !conn_match(match->sig,
49337 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49338 + sig->gr_dport)) {
49339 + last = match;
49340 + match = match->next;
49341 + }
49342 +
49343 + if (match) {
49344 + if (last)
49345 + last->next = match->next;
49346 + else
49347 + gr_conn_table[index] = NULL;
49348 + kfree(match);
49349 + }
49350 +
49351 + return;
49352 +}
49353 +
49354 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49355 + __u16 sport, __u16 dport)
49356 +{
49357 + struct conn_table_entry *match;
49358 + unsigned int index;
49359 +
49360 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49361 +
49362 + match = gr_conn_table[index];
49363 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49364 + match = match->next;
49365 +
49366 + if (match)
49367 + return match->sig;
49368 + else
49369 + return NULL;
49370 +}
49371 +
49372 +#endif
49373 +
49374 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49375 +{
49376 +#ifdef CONFIG_GRKERNSEC
49377 + struct signal_struct *sig = task->signal;
49378 + struct conn_table_entry *newent;
49379 +
49380 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49381 + if (newent == NULL)
49382 + return;
49383 + /* no bh lock needed since we are called with bh disabled */
49384 + spin_lock(&gr_conn_table_lock);
49385 + gr_del_task_from_ip_table_nolock(sig);
49386 + sig->gr_saddr = inet->inet_rcv_saddr;
49387 + sig->gr_daddr = inet->inet_daddr;
49388 + sig->gr_sport = inet->inet_sport;
49389 + sig->gr_dport = inet->inet_dport;
49390 + gr_add_to_task_ip_table_nolock(sig, newent);
49391 + spin_unlock(&gr_conn_table_lock);
49392 +#endif
49393 + return;
49394 +}
49395 +
49396 +void gr_del_task_from_ip_table(struct task_struct *task)
49397 +{
49398 +#ifdef CONFIG_GRKERNSEC
49399 + spin_lock_bh(&gr_conn_table_lock);
49400 + gr_del_task_from_ip_table_nolock(task->signal);
49401 + spin_unlock_bh(&gr_conn_table_lock);
49402 +#endif
49403 + return;
49404 +}
49405 +
49406 +void
49407 +gr_attach_curr_ip(const struct sock *sk)
49408 +{
49409 +#ifdef CONFIG_GRKERNSEC
49410 + struct signal_struct *p, *set;
49411 + const struct inet_sock *inet = inet_sk(sk);
49412 +
49413 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49414 + return;
49415 +
49416 + set = current->signal;
49417 +
49418 + spin_lock_bh(&gr_conn_table_lock);
49419 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49420 + inet->inet_dport, inet->inet_sport);
49421 + if (unlikely(p != NULL)) {
49422 + set->curr_ip = p->curr_ip;
49423 + set->used_accept = 1;
49424 + gr_del_task_from_ip_table_nolock(p);
49425 + spin_unlock_bh(&gr_conn_table_lock);
49426 + return;
49427 + }
49428 + spin_unlock_bh(&gr_conn_table_lock);
49429 +
49430 + set->curr_ip = inet->inet_daddr;
49431 + set->used_accept = 1;
49432 +#endif
49433 + return;
49434 +}
49435 +
49436 +int
49437 +gr_handle_sock_all(const int family, const int type, const int protocol)
49438 +{
49439 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49440 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49441 + (family != AF_UNIX)) {
49442 + if (family == AF_INET)
49443 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49444 + else
49445 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49446 + return -EACCES;
49447 + }
49448 +#endif
49449 + return 0;
49450 +}
49451 +
49452 +int
49453 +gr_handle_sock_server(const struct sockaddr *sck)
49454 +{
49455 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49456 + if (grsec_enable_socket_server &&
49457 + in_group_p(grsec_socket_server_gid) &&
49458 + sck && (sck->sa_family != AF_UNIX) &&
49459 + (sck->sa_family != AF_LOCAL)) {
49460 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49461 + return -EACCES;
49462 + }
49463 +#endif
49464 + return 0;
49465 +}
49466 +
49467 +int
49468 +gr_handle_sock_server_other(const struct sock *sck)
49469 +{
49470 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49471 + if (grsec_enable_socket_server &&
49472 + in_group_p(grsec_socket_server_gid) &&
49473 + sck && (sck->sk_family != AF_UNIX) &&
49474 + (sck->sk_family != AF_LOCAL)) {
49475 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49476 + return -EACCES;
49477 + }
49478 +#endif
49479 + return 0;
49480 +}
49481 +
49482 +int
49483 +gr_handle_sock_client(const struct sockaddr *sck)
49484 +{
49485 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49486 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49487 + sck && (sck->sa_family != AF_UNIX) &&
49488 + (sck->sa_family != AF_LOCAL)) {
49489 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49490 + return -EACCES;
49491 + }
49492 +#endif
49493 + return 0;
49494 +}
49495 diff -urNp linux-2.6.39.4/grsecurity/grsec_sysctl.c linux-2.6.39.4/grsecurity/grsec_sysctl.c
49496 --- linux-2.6.39.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49497 +++ linux-2.6.39.4/grsecurity/grsec_sysctl.c 2011-08-05 19:44:37.000000000 -0400
49498 @@ -0,0 +1,442 @@
49499 +#include <linux/kernel.h>
49500 +#include <linux/sched.h>
49501 +#include <linux/sysctl.h>
49502 +#include <linux/grsecurity.h>
49503 +#include <linux/grinternal.h>
49504 +
49505 +int
49506 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49507 +{
49508 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49509 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49510 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49511 + return -EACCES;
49512 + }
49513 +#endif
49514 + return 0;
49515 +}
49516 +
49517 +#ifdef CONFIG_GRKERNSEC_ROFS
49518 +static int __maybe_unused one = 1;
49519 +#endif
49520 +
49521 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49522 +struct ctl_table grsecurity_table[] = {
49523 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49524 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49525 +#ifdef CONFIG_GRKERNSEC_IO
49526 + {
49527 + .procname = "disable_priv_io",
49528 + .data = &grsec_disable_privio,
49529 + .maxlen = sizeof(int),
49530 + .mode = 0600,
49531 + .proc_handler = &proc_dointvec,
49532 + },
49533 +#endif
49534 +#endif
49535 +#ifdef CONFIG_GRKERNSEC_LINK
49536 + {
49537 + .procname = "linking_restrictions",
49538 + .data = &grsec_enable_link,
49539 + .maxlen = sizeof(int),
49540 + .mode = 0600,
49541 + .proc_handler = &proc_dointvec,
49542 + },
49543 +#endif
49544 +#ifdef CONFIG_GRKERNSEC_BRUTE
49545 + {
49546 + .procname = "deter_bruteforce",
49547 + .data = &grsec_enable_brute,
49548 + .maxlen = sizeof(int),
49549 + .mode = 0600,
49550 + .proc_handler = &proc_dointvec,
49551 + },
49552 +#endif
49553 +#ifdef CONFIG_GRKERNSEC_FIFO
49554 + {
49555 + .procname = "fifo_restrictions",
49556 + .data = &grsec_enable_fifo,
49557 + .maxlen = sizeof(int),
49558 + .mode = 0600,
49559 + .proc_handler = &proc_dointvec,
49560 + },
49561 +#endif
49562 +#ifdef CONFIG_GRKERNSEC_EXECVE
49563 + {
49564 + .procname = "execve_limiting",
49565 + .data = &grsec_enable_execve,
49566 + .maxlen = sizeof(int),
49567 + .mode = 0600,
49568 + .proc_handler = &proc_dointvec,
49569 + },
49570 +#endif
49571 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49572 + {
49573 + .procname = "ip_blackhole",
49574 + .data = &grsec_enable_blackhole,
49575 + .maxlen = sizeof(int),
49576 + .mode = 0600,
49577 + .proc_handler = &proc_dointvec,
49578 + },
49579 + {
49580 + .procname = "lastack_retries",
49581 + .data = &grsec_lastack_retries,
49582 + .maxlen = sizeof(int),
49583 + .mode = 0600,
49584 + .proc_handler = &proc_dointvec,
49585 + },
49586 +#endif
49587 +#ifdef CONFIG_GRKERNSEC_EXECLOG
49588 + {
49589 + .procname = "exec_logging",
49590 + .data = &grsec_enable_execlog,
49591 + .maxlen = sizeof(int),
49592 + .mode = 0600,
49593 + .proc_handler = &proc_dointvec,
49594 + },
49595 +#endif
49596 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49597 + {
49598 + .procname = "rwxmap_logging",
49599 + .data = &grsec_enable_log_rwxmaps,
49600 + .maxlen = sizeof(int),
49601 + .mode = 0600,
49602 + .proc_handler = &proc_dointvec,
49603 + },
49604 +#endif
49605 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49606 + {
49607 + .procname = "signal_logging",
49608 + .data = &grsec_enable_signal,
49609 + .maxlen = sizeof(int),
49610 + .mode = 0600,
49611 + .proc_handler = &proc_dointvec,
49612 + },
49613 +#endif
49614 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
49615 + {
49616 + .procname = "forkfail_logging",
49617 + .data = &grsec_enable_forkfail,
49618 + .maxlen = sizeof(int),
49619 + .mode = 0600,
49620 + .proc_handler = &proc_dointvec,
49621 + },
49622 +#endif
49623 +#ifdef CONFIG_GRKERNSEC_TIME
49624 + {
49625 + .procname = "timechange_logging",
49626 + .data = &grsec_enable_time,
49627 + .maxlen = sizeof(int),
49628 + .mode = 0600,
49629 + .proc_handler = &proc_dointvec,
49630 + },
49631 +#endif
49632 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49633 + {
49634 + .procname = "chroot_deny_shmat",
49635 + .data = &grsec_enable_chroot_shmat,
49636 + .maxlen = sizeof(int),
49637 + .mode = 0600,
49638 + .proc_handler = &proc_dointvec,
49639 + },
49640 +#endif
49641 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49642 + {
49643 + .procname = "chroot_deny_unix",
49644 + .data = &grsec_enable_chroot_unix,
49645 + .maxlen = sizeof(int),
49646 + .mode = 0600,
49647 + .proc_handler = &proc_dointvec,
49648 + },
49649 +#endif
49650 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49651 + {
49652 + .procname = "chroot_deny_mount",
49653 + .data = &grsec_enable_chroot_mount,
49654 + .maxlen = sizeof(int),
49655 + .mode = 0600,
49656 + .proc_handler = &proc_dointvec,
49657 + },
49658 +#endif
49659 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49660 + {
49661 + .procname = "chroot_deny_fchdir",
49662 + .data = &grsec_enable_chroot_fchdir,
49663 + .maxlen = sizeof(int),
49664 + .mode = 0600,
49665 + .proc_handler = &proc_dointvec,
49666 + },
49667 +#endif
49668 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49669 + {
49670 + .procname = "chroot_deny_chroot",
49671 + .data = &grsec_enable_chroot_double,
49672 + .maxlen = sizeof(int),
49673 + .mode = 0600,
49674 + .proc_handler = &proc_dointvec,
49675 + },
49676 +#endif
49677 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49678 + {
49679 + .procname = "chroot_deny_pivot",
49680 + .data = &grsec_enable_chroot_pivot,
49681 + .maxlen = sizeof(int),
49682 + .mode = 0600,
49683 + .proc_handler = &proc_dointvec,
49684 + },
49685 +#endif
49686 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49687 + {
49688 + .procname = "chroot_enforce_chdir",
49689 + .data = &grsec_enable_chroot_chdir,
49690 + .maxlen = sizeof(int),
49691 + .mode = 0600,
49692 + .proc_handler = &proc_dointvec,
49693 + },
49694 +#endif
49695 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49696 + {
49697 + .procname = "chroot_deny_chmod",
49698 + .data = &grsec_enable_chroot_chmod,
49699 + .maxlen = sizeof(int),
49700 + .mode = 0600,
49701 + .proc_handler = &proc_dointvec,
49702 + },
49703 +#endif
49704 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49705 + {
49706 + .procname = "chroot_deny_mknod",
49707 + .data = &grsec_enable_chroot_mknod,
49708 + .maxlen = sizeof(int),
49709 + .mode = 0600,
49710 + .proc_handler = &proc_dointvec,
49711 + },
49712 +#endif
49713 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49714 + {
49715 + .procname = "chroot_restrict_nice",
49716 + .data = &grsec_enable_chroot_nice,
49717 + .maxlen = sizeof(int),
49718 + .mode = 0600,
49719 + .proc_handler = &proc_dointvec,
49720 + },
49721 +#endif
49722 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49723 + {
49724 + .procname = "chroot_execlog",
49725 + .data = &grsec_enable_chroot_execlog,
49726 + .maxlen = sizeof(int),
49727 + .mode = 0600,
49728 + .proc_handler = &proc_dointvec,
49729 + },
49730 +#endif
49731 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49732 + {
49733 + .procname = "chroot_caps",
49734 + .data = &grsec_enable_chroot_caps,
49735 + .maxlen = sizeof(int),
49736 + .mode = 0600,
49737 + .proc_handler = &proc_dointvec,
49738 + },
49739 +#endif
49740 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49741 + {
49742 + .procname = "chroot_deny_sysctl",
49743 + .data = &grsec_enable_chroot_sysctl,
49744 + .maxlen = sizeof(int),
49745 + .mode = 0600,
49746 + .proc_handler = &proc_dointvec,
49747 + },
49748 +#endif
49749 +#ifdef CONFIG_GRKERNSEC_TPE
49750 + {
49751 + .procname = "tpe",
49752 + .data = &grsec_enable_tpe,
49753 + .maxlen = sizeof(int),
49754 + .mode = 0600,
49755 + .proc_handler = &proc_dointvec,
49756 + },
49757 + {
49758 + .procname = "tpe_gid",
49759 + .data = &grsec_tpe_gid,
49760 + .maxlen = sizeof(int),
49761 + .mode = 0600,
49762 + .proc_handler = &proc_dointvec,
49763 + },
49764 +#endif
49765 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49766 + {
49767 + .procname = "tpe_invert",
49768 + .data = &grsec_enable_tpe_invert,
49769 + .maxlen = sizeof(int),
49770 + .mode = 0600,
49771 + .proc_handler = &proc_dointvec,
49772 + },
49773 +#endif
49774 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49775 + {
49776 + .procname = "tpe_restrict_all",
49777 + .data = &grsec_enable_tpe_all,
49778 + .maxlen = sizeof(int),
49779 + .mode = 0600,
49780 + .proc_handler = &proc_dointvec,
49781 + },
49782 +#endif
49783 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49784 + {
49785 + .procname = "socket_all",
49786 + .data = &grsec_enable_socket_all,
49787 + .maxlen = sizeof(int),
49788 + .mode = 0600,
49789 + .proc_handler = &proc_dointvec,
49790 + },
49791 + {
49792 + .procname = "socket_all_gid",
49793 + .data = &grsec_socket_all_gid,
49794 + .maxlen = sizeof(int),
49795 + .mode = 0600,
49796 + .proc_handler = &proc_dointvec,
49797 + },
49798 +#endif
49799 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49800 + {
49801 + .procname = "socket_client",
49802 + .data = &grsec_enable_socket_client,
49803 + .maxlen = sizeof(int),
49804 + .mode = 0600,
49805 + .proc_handler = &proc_dointvec,
49806 + },
49807 + {
49808 + .procname = "socket_client_gid",
49809 + .data = &grsec_socket_client_gid,
49810 + .maxlen = sizeof(int),
49811 + .mode = 0600,
49812 + .proc_handler = &proc_dointvec,
49813 + },
49814 +#endif
49815 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49816 + {
49817 + .procname = "socket_server",
49818 + .data = &grsec_enable_socket_server,
49819 + .maxlen = sizeof(int),
49820 + .mode = 0600,
49821 + .proc_handler = &proc_dointvec,
49822 + },
49823 + {
49824 + .procname = "socket_server_gid",
49825 + .data = &grsec_socket_server_gid,
49826 + .maxlen = sizeof(int),
49827 + .mode = 0600,
49828 + .proc_handler = &proc_dointvec,
49829 + },
49830 +#endif
49831 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49832 + {
49833 + .procname = "audit_group",
49834 + .data = &grsec_enable_group,
49835 + .maxlen = sizeof(int),
49836 + .mode = 0600,
49837 + .proc_handler = &proc_dointvec,
49838 + },
49839 + {
49840 + .procname = "audit_gid",
49841 + .data = &grsec_audit_gid,
49842 + .maxlen = sizeof(int),
49843 + .mode = 0600,
49844 + .proc_handler = &proc_dointvec,
49845 + },
49846 +#endif
49847 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49848 + {
49849 + .procname = "audit_chdir",
49850 + .data = &grsec_enable_chdir,
49851 + .maxlen = sizeof(int),
49852 + .mode = 0600,
49853 + .proc_handler = &proc_dointvec,
49854 + },
49855 +#endif
49856 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49857 + {
49858 + .procname = "audit_mount",
49859 + .data = &grsec_enable_mount,
49860 + .maxlen = sizeof(int),
49861 + .mode = 0600,
49862 + .proc_handler = &proc_dointvec,
49863 + },
49864 +#endif
49865 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49866 + {
49867 + .procname = "audit_textrel",
49868 + .data = &grsec_enable_audit_textrel,
49869 + .maxlen = sizeof(int),
49870 + .mode = 0600,
49871 + .proc_handler = &proc_dointvec,
49872 + },
49873 +#endif
49874 +#ifdef CONFIG_GRKERNSEC_DMESG
49875 + {
49876 + .procname = "dmesg",
49877 + .data = &grsec_enable_dmesg,
49878 + .maxlen = sizeof(int),
49879 + .mode = 0600,
49880 + .proc_handler = &proc_dointvec,
49881 + },
49882 +#endif
49883 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49884 + {
49885 + .procname = "chroot_findtask",
49886 + .data = &grsec_enable_chroot_findtask,
49887 + .maxlen = sizeof(int),
49888 + .mode = 0600,
49889 + .proc_handler = &proc_dointvec,
49890 + },
49891 +#endif
49892 +#ifdef CONFIG_GRKERNSEC_RESLOG
49893 + {
49894 + .procname = "resource_logging",
49895 + .data = &grsec_resource_logging,
49896 + .maxlen = sizeof(int),
49897 + .mode = 0600,
49898 + .proc_handler = &proc_dointvec,
49899 + },
49900 +#endif
49901 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49902 + {
49903 + .procname = "audit_ptrace",
49904 + .data = &grsec_enable_audit_ptrace,
49905 + .maxlen = sizeof(int),
49906 + .mode = 0600,
49907 + .proc_handler = &proc_dointvec,
49908 + },
49909 +#endif
49910 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49911 + {
49912 + .procname = "harden_ptrace",
49913 + .data = &grsec_enable_harden_ptrace,
49914 + .maxlen = sizeof(int),
49915 + .mode = 0600,
49916 + .proc_handler = &proc_dointvec,
49917 + },
49918 +#endif
49919 + {
49920 + .procname = "grsec_lock",
49921 + .data = &grsec_lock,
49922 + .maxlen = sizeof(int),
49923 + .mode = 0600,
49924 + .proc_handler = &proc_dointvec,
49925 + },
49926 +#endif
49927 +#ifdef CONFIG_GRKERNSEC_ROFS
49928 + {
49929 + .procname = "romount_protect",
49930 + .data = &grsec_enable_rofs,
49931 + .maxlen = sizeof(int),
49932 + .mode = 0600,
49933 + .proc_handler = &proc_dointvec_minmax,
49934 + .extra1 = &one,
49935 + .extra2 = &one,
49936 + },
49937 +#endif
49938 + { }
49939 +};
49940 +#endif
49941 diff -urNp linux-2.6.39.4/grsecurity/grsec_time.c linux-2.6.39.4/grsecurity/grsec_time.c
49942 --- linux-2.6.39.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49943 +++ linux-2.6.39.4/grsecurity/grsec_time.c 2011-08-05 19:44:37.000000000 -0400
49944 @@ -0,0 +1,16 @@
49945 +#include <linux/kernel.h>
49946 +#include <linux/sched.h>
49947 +#include <linux/grinternal.h>
49948 +#include <linux/module.h>
49949 +
49950 +void
49951 +gr_log_timechange(void)
49952 +{
49953 +#ifdef CONFIG_GRKERNSEC_TIME
49954 + if (grsec_enable_time)
49955 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49956 +#endif
49957 + return;
49958 +}
49959 +
49960 +EXPORT_SYMBOL(gr_log_timechange);
49961 diff -urNp linux-2.6.39.4/grsecurity/grsec_tpe.c linux-2.6.39.4/grsecurity/grsec_tpe.c
49962 --- linux-2.6.39.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49963 +++ linux-2.6.39.4/grsecurity/grsec_tpe.c 2011-08-05 19:44:37.000000000 -0400
49964 @@ -0,0 +1,39 @@
49965 +#include <linux/kernel.h>
49966 +#include <linux/sched.h>
49967 +#include <linux/file.h>
49968 +#include <linux/fs.h>
49969 +#include <linux/grinternal.h>
49970 +
49971 +extern int gr_acl_tpe_check(void);
49972 +
49973 +int
49974 +gr_tpe_allow(const struct file *file)
49975 +{
49976 +#ifdef CONFIG_GRKERNSEC
49977 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49978 + const struct cred *cred = current_cred();
49979 +
49980 + if (cred->uid && ((grsec_enable_tpe &&
49981 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49982 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49983 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49984 +#else
49985 + in_group_p(grsec_tpe_gid)
49986 +#endif
49987 + ) || gr_acl_tpe_check()) &&
49988 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49989 + (inode->i_mode & S_IWOTH))))) {
49990 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49991 + return 0;
49992 + }
49993 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49994 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49995 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49996 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49997 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49998 + return 0;
49999 + }
50000 +#endif
50001 +#endif
50002 + return 1;
50003 +}
50004 diff -urNp linux-2.6.39.4/grsecurity/grsum.c linux-2.6.39.4/grsecurity/grsum.c
50005 --- linux-2.6.39.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
50006 +++ linux-2.6.39.4/grsecurity/grsum.c 2011-08-05 19:44:37.000000000 -0400
50007 @@ -0,0 +1,61 @@
50008 +#include <linux/err.h>
50009 +#include <linux/kernel.h>
50010 +#include <linux/sched.h>
50011 +#include <linux/mm.h>
50012 +#include <linux/scatterlist.h>
50013 +#include <linux/crypto.h>
50014 +#include <linux/gracl.h>
50015 +
50016 +
50017 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
50018 +#error "crypto and sha256 must be built into the kernel"
50019 +#endif
50020 +
50021 +int
50022 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
50023 +{
50024 + char *p;
50025 + struct crypto_hash *tfm;
50026 + struct hash_desc desc;
50027 + struct scatterlist sg;
50028 + unsigned char temp_sum[GR_SHA_LEN];
50029 + volatile int retval = 0;
50030 + volatile int dummy = 0;
50031 + unsigned int i;
50032 +
50033 + sg_init_table(&sg, 1);
50034 +
50035 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
50036 + if (IS_ERR(tfm)) {
50037 + /* should never happen, since sha256 should be built in */
50038 + return 1;
50039 + }
50040 +
50041 + desc.tfm = tfm;
50042 + desc.flags = 0;
50043 +
50044 + crypto_hash_init(&desc);
50045 +
50046 + p = salt;
50047 + sg_set_buf(&sg, p, GR_SALT_LEN);
50048 + crypto_hash_update(&desc, &sg, sg.length);
50049 +
50050 + p = entry->pw;
50051 + sg_set_buf(&sg, p, strlen(p));
50052 +
50053 + crypto_hash_update(&desc, &sg, sg.length);
50054 +
50055 + crypto_hash_final(&desc, temp_sum);
50056 +
50057 + memset(entry->pw, 0, GR_PW_LEN);
50058 +
50059 + for (i = 0; i < GR_SHA_LEN; i++)
50060 + if (sum[i] != temp_sum[i])
50061 + retval = 1;
50062 + else
50063 + dummy = 1; // waste a cycle
50064 +
50065 + crypto_free_hash(tfm);
50066 +
50067 + return retval;
50068 +}
50069 diff -urNp linux-2.6.39.4/grsecurity/Kconfig linux-2.6.39.4/grsecurity/Kconfig
50070 --- linux-2.6.39.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
50071 +++ linux-2.6.39.4/grsecurity/Kconfig 2011-08-17 19:04:52.000000000 -0400
50072 @@ -0,0 +1,1050 @@
50073 +#
50074 +# grecurity configuration
50075 +#
50076 +
50077 +menu "Grsecurity"
50078 +
50079 +config GRKERNSEC
50080 + bool "Grsecurity"
50081 + select CRYPTO
50082 + select CRYPTO_SHA256
50083 + help
50084 + If you say Y here, you will be able to configure many features
50085 + that will enhance the security of your system. It is highly
50086 + recommended that you say Y here and read through the help
50087 + for each option so that you fully understand the features and
50088 + can evaluate their usefulness for your machine.
50089 +
50090 +choice
50091 + prompt "Security Level"
50092 + depends on GRKERNSEC
50093 + default GRKERNSEC_CUSTOM
50094 +
50095 +config GRKERNSEC_LOW
50096 + bool "Low"
50097 + select GRKERNSEC_LINK
50098 + select GRKERNSEC_FIFO
50099 + select GRKERNSEC_EXECVE
50100 + select GRKERNSEC_RANDNET
50101 + select GRKERNSEC_DMESG
50102 + select GRKERNSEC_CHROOT
50103 + select GRKERNSEC_CHROOT_CHDIR
50104 +
50105 + help
50106 + If you choose this option, several of the grsecurity options will
50107 + be enabled that will give you greater protection against a number
50108 + of attacks, while assuring that none of your software will have any
50109 + conflicts with the additional security measures. If you run a lot
50110 + of unusual software, or you are having problems with the higher
50111 + security levels, you should say Y here. With this option, the
50112 + following features are enabled:
50113 +
50114 + - Linking restrictions
50115 + - FIFO restrictions
50116 + - Enforcing RLIMIT_NPROC on execve
50117 + - Restricted dmesg
50118 + - Enforced chdir("/") on chroot
50119 + - Runtime module disabling
50120 +
50121 +config GRKERNSEC_MEDIUM
50122 + bool "Medium"
50123 + select PAX
50124 + select PAX_EI_PAX
50125 + select PAX_PT_PAX_FLAGS
50126 + select PAX_HAVE_ACL_FLAGS
50127 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50128 + select GRKERNSEC_CHROOT
50129 + select GRKERNSEC_CHROOT_SYSCTL
50130 + select GRKERNSEC_LINK
50131 + select GRKERNSEC_FIFO
50132 + select GRKERNSEC_EXECVE
50133 + select GRKERNSEC_DMESG
50134 + select GRKERNSEC_RANDNET
50135 + select GRKERNSEC_FORKFAIL
50136 + select GRKERNSEC_TIME
50137 + select GRKERNSEC_SIGNAL
50138 + select GRKERNSEC_CHROOT
50139 + select GRKERNSEC_CHROOT_UNIX
50140 + select GRKERNSEC_CHROOT_MOUNT
50141 + select GRKERNSEC_CHROOT_PIVOT
50142 + select GRKERNSEC_CHROOT_DOUBLE
50143 + select GRKERNSEC_CHROOT_CHDIR
50144 + select GRKERNSEC_CHROOT_MKNOD
50145 + select GRKERNSEC_PROC
50146 + select GRKERNSEC_PROC_USERGROUP
50147 + select PAX_RANDUSTACK
50148 + select PAX_ASLR
50149 + select PAX_RANDMMAP
50150 + select PAX_REFCOUNT if (X86 || SPARC64)
50151 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
50152 +
50153 + help
50154 + If you say Y here, several features in addition to those included
50155 + in the low additional security level will be enabled. These
50156 + features provide even more security to your system, though in rare
50157 + cases they may be incompatible with very old or poorly written
50158 + software. If you enable this option, make sure that your auth
50159 + service (identd) is running as gid 1001. With this option,
50160 + the following features (in addition to those provided in the
50161 + low additional security level) will be enabled:
50162 +
50163 + - Failed fork logging
50164 + - Time change logging
50165 + - Signal logging
50166 + - Deny mounts in chroot
50167 + - Deny double chrooting
50168 + - Deny sysctl writes in chroot
50169 + - Deny mknod in chroot
50170 + - Deny access to abstract AF_UNIX sockets out of chroot
50171 + - Deny pivot_root in chroot
50172 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
50173 + - /proc restrictions with special GID set to 10 (usually wheel)
50174 + - Address Space Layout Randomization (ASLR)
50175 + - Prevent exploitation of most refcount overflows
50176 + - Bounds checking of copying between the kernel and userland
50177 +
50178 +config GRKERNSEC_HIGH
50179 + bool "High"
50180 + select GRKERNSEC_LINK
50181 + select GRKERNSEC_FIFO
50182 + select GRKERNSEC_EXECVE
50183 + select GRKERNSEC_DMESG
50184 + select GRKERNSEC_FORKFAIL
50185 + select GRKERNSEC_TIME
50186 + select GRKERNSEC_SIGNAL
50187 + select GRKERNSEC_CHROOT
50188 + select GRKERNSEC_CHROOT_SHMAT
50189 + select GRKERNSEC_CHROOT_UNIX
50190 + select GRKERNSEC_CHROOT_MOUNT
50191 + select GRKERNSEC_CHROOT_FCHDIR
50192 + select GRKERNSEC_CHROOT_PIVOT
50193 + select GRKERNSEC_CHROOT_DOUBLE
50194 + select GRKERNSEC_CHROOT_CHDIR
50195 + select GRKERNSEC_CHROOT_MKNOD
50196 + select GRKERNSEC_CHROOT_CAPS
50197 + select GRKERNSEC_CHROOT_SYSCTL
50198 + select GRKERNSEC_CHROOT_FINDTASK
50199 + select GRKERNSEC_SYSFS_RESTRICT
50200 + select GRKERNSEC_PROC
50201 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50202 + select GRKERNSEC_HIDESYM
50203 + select GRKERNSEC_BRUTE
50204 + select GRKERNSEC_PROC_USERGROUP
50205 + select GRKERNSEC_KMEM
50206 + select GRKERNSEC_RESLOG
50207 + select GRKERNSEC_RANDNET
50208 + select GRKERNSEC_PROC_ADD
50209 + select GRKERNSEC_CHROOT_CHMOD
50210 + select GRKERNSEC_CHROOT_NICE
50211 + select GRKERNSEC_AUDIT_MOUNT
50212 + select GRKERNSEC_MODHARDEN if (MODULES)
50213 + select GRKERNSEC_HARDEN_PTRACE
50214 + select GRKERNSEC_VM86 if (X86_32)
50215 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50216 + select PAX
50217 + select PAX_RANDUSTACK
50218 + select PAX_ASLR
50219 + select PAX_RANDMMAP
50220 + select PAX_NOEXEC
50221 + select PAX_MPROTECT
50222 + select PAX_EI_PAX
50223 + select PAX_PT_PAX_FLAGS
50224 + select PAX_HAVE_ACL_FLAGS
50225 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50226 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
50227 + select PAX_RANDKSTACK if (X86_TSC && X86)
50228 + select PAX_SEGMEXEC if (X86_32)
50229 + select PAX_PAGEEXEC
50230 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50231 + select PAX_EMUTRAMP if (PARISC)
50232 + select PAX_EMUSIGRT if (PARISC)
50233 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50234 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50235 + select PAX_REFCOUNT if (X86 || SPARC64)
50236 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50237 + help
50238 + If you say Y here, many of the features of grsecurity will be
50239 + enabled, which will protect you against many kinds of attacks
50240 + against your system. The heightened security comes at a cost
50241 + of an increased chance of incompatibilities with rare software
50242 + on your machine. Since this security level enables PaX, you should
50243 + view <http://pax.grsecurity.net> and read about the PaX
50244 + project. While you are there, download chpax and run it on
50245 + binaries that cause problems with PaX. Also remember that
50246 + since the /proc restrictions are enabled, you must run your
50247 + identd as gid 1001. This security level enables the following
50248 + features in addition to those listed in the low and medium
50249 + security levels:
50250 +
50251 + - Additional /proc restrictions
50252 + - Chmod restrictions in chroot
50253 + - No signals, ptrace, or viewing of processes outside of chroot
50254 + - Capability restrictions in chroot
50255 + - Deny fchdir out of chroot
50256 + - Priority restrictions in chroot
50257 + - Segmentation-based implementation of PaX
50258 + - Mprotect restrictions
50259 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50260 + - Kernel stack randomization
50261 + - Mount/unmount/remount logging
50262 + - Kernel symbol hiding
50263 + - Prevention of memory exhaustion-based exploits
50264 + - Hardening of module auto-loading
50265 + - Ptrace restrictions
50266 + - Restricted vm86 mode
50267 + - Restricted sysfs/debugfs
50268 + - Active kernel exploit response
50269 +
50270 +config GRKERNSEC_CUSTOM
50271 + bool "Custom"
50272 + help
50273 + If you say Y here, you will be able to configure every grsecurity
50274 + option, which allows you to enable many more features that aren't
50275 + covered in the basic security levels. These additional features
50276 + include TPE, socket restrictions, and the sysctl system for
50277 + grsecurity. It is advised that you read through the help for
50278 + each option to determine its usefulness in your situation.
50279 +
50280 +endchoice
50281 +
50282 +menu "Address Space Protection"
50283 +depends on GRKERNSEC
50284 +
50285 +config GRKERNSEC_KMEM
50286 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50287 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50288 + help
50289 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50290 + be written to via mmap or otherwise to modify the running kernel.
50291 + /dev/port will also not be allowed to be opened. If you have module
50292 + support disabled, enabling this will close up four ways that are
50293 + currently used to insert malicious code into the running kernel.
50294 + Even with all these features enabled, we still highly recommend that
50295 + you use the RBAC system, as it is still possible for an attacker to
50296 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50297 + If you are not using XFree86, you may be able to stop this additional
50298 + case by enabling the 'Disable privileged I/O' option. Though nothing
50299 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50300 + but only to video memory, which is the only writing we allow in this
50301 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50302 + not be allowed to mprotect it with PROT_WRITE later.
50303 + It is highly recommended that you say Y here if you meet all the
50304 + conditions above.
50305 +
50306 +config GRKERNSEC_VM86
50307 + bool "Restrict VM86 mode"
50308 + depends on X86_32
50309 +
50310 + help
50311 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50312 + make use of a special execution mode on 32bit x86 processors called
50313 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50314 + video cards and will still work with this option enabled. The purpose
50315 + of the option is to prevent exploitation of emulation errors in
50316 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50317 + Nearly all users should be able to enable this option.
50318 +
50319 +config GRKERNSEC_IO
50320 + bool "Disable privileged I/O"
50321 + depends on X86
50322 + select RTC_CLASS
50323 + select RTC_INTF_DEV
50324 + select RTC_DRV_CMOS
50325 +
50326 + help
50327 + If you say Y here, all ioperm and iopl calls will return an error.
50328 + Ioperm and iopl can be used to modify the running kernel.
50329 + Unfortunately, some programs need this access to operate properly,
50330 + the most notable of which are XFree86 and hwclock. hwclock can be
50331 + remedied by having RTC support in the kernel, so real-time
50332 + clock support is enabled if this option is enabled, to ensure
50333 + that hwclock operates correctly. XFree86 still will not
50334 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50335 + IF YOU USE XFree86. If you use XFree86 and you still want to
50336 + protect your kernel against modification, use the RBAC system.
50337 +
50338 +config GRKERNSEC_PROC_MEMMAP
50339 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50340 + default y if (PAX_NOEXEC || PAX_ASLR)
50341 + depends on PAX_NOEXEC || PAX_ASLR
50342 + help
50343 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50344 + give no information about the addresses of its mappings if
50345 + PaX features that rely on random addresses are enabled on the task.
50346 + If you use PaX it is greatly recommended that you say Y here as it
50347 + closes up a hole that makes the full ASLR useless for suid
50348 + binaries.
50349 +
50350 +config GRKERNSEC_BRUTE
50351 + bool "Deter exploit bruteforcing"
50352 + help
50353 + If you say Y here, attempts to bruteforce exploits against forking
50354 + daemons such as apache or sshd, as well as against suid/sgid binaries
50355 + will be deterred. When a child of a forking daemon is killed by PaX
50356 + or crashes due to an illegal instruction or other suspicious signal,
50357 + the parent process will be delayed 30 seconds upon every subsequent
50358 + fork until the administrator is able to assess the situation and
50359 + restart the daemon.
50360 + In the suid/sgid case, the attempt is logged, the user has all their
50361 + processes terminated, and they are prevented from executing any further
50362 + processes for 15 minutes.
50363 + It is recommended that you also enable signal logging in the auditing
50364 + section so that logs are generated when a process triggers a suspicious
50365 + signal.
50366 + If the sysctl option is enabled, a sysctl option with name
50367 + "deter_bruteforce" is created.
50368 +
50369 +
50370 +config GRKERNSEC_MODHARDEN
50371 + bool "Harden module auto-loading"
50372 + depends on MODULES
50373 + help
50374 + If you say Y here, module auto-loading in response to use of some
50375 + feature implemented by an unloaded module will be restricted to
50376 + root users. Enabling this option helps defend against attacks
50377 + by unprivileged users who abuse the auto-loading behavior to
50378 + cause a vulnerable module to load that is then exploited.
50379 +
50380 + If this option prevents a legitimate use of auto-loading for a
50381 + non-root user, the administrator can execute modprobe manually
50382 + with the exact name of the module mentioned in the alert log.
50383 + Alternatively, the administrator can add the module to the list
50384 + of modules loaded at boot by modifying init scripts.
50385 +
50386 + Modification of init scripts will most likely be needed on
50387 + Ubuntu servers with encrypted home directory support enabled,
50388 + as the first non-root user logging in will cause the ecb(aes),
50389 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50390 +
50391 +config GRKERNSEC_HIDESYM
50392 + bool "Hide kernel symbols"
50393 + help
50394 + If you say Y here, getting information on loaded modules, and
50395 + displaying all kernel symbols through a syscall will be restricted
50396 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50397 + /proc/kallsyms will be restricted to the root user. The RBAC
50398 + system can hide that entry even from root.
50399 +
50400 + This option also prevents leaking of kernel addresses through
50401 + several /proc entries.
50402 +
50403 + Note that this option is only effective provided the following
50404 + conditions are met:
50405 + 1) The kernel using grsecurity is not precompiled by some distribution
50406 + 2) You have also enabled GRKERNSEC_DMESG
50407 + 3) You are using the RBAC system and hiding other files such as your
50408 + kernel image and System.map. Alternatively, enabling this option
50409 + causes the permissions on /boot, /lib/modules, and the kernel
50410 + source directory to change at compile time to prevent
50411 + reading by non-root users.
50412 + If the above conditions are met, this option will aid in providing a
50413 + useful protection against local kernel exploitation of overflows
50414 + and arbitrary read/write vulnerabilities.
50415 +
50416 +config GRKERNSEC_KERN_LOCKOUT
50417 + bool "Active kernel exploit response"
50418 + depends on X86 || ARM || PPC || SPARC
50419 + help
50420 + If you say Y here, when a PaX alert is triggered due to suspicious
50421 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50422 + or an OOPs occurs due to bad memory accesses, instead of just
50423 + terminating the offending process (and potentially allowing
50424 + a subsequent exploit from the same user), we will take one of two
50425 + actions:
50426 + If the user was root, we will panic the system
50427 + If the user was non-root, we will log the attempt, terminate
50428 + all processes owned by the user, then prevent them from creating
50429 + any new processes until the system is restarted
50430 + This deters repeated kernel exploitation/bruteforcing attempts
50431 + and is useful for later forensics.
50432 +
50433 +endmenu
50434 +menu "Role Based Access Control Options"
50435 +depends on GRKERNSEC
50436 +
50437 +config GRKERNSEC_RBAC_DEBUG
50438 + bool
50439 +
50440 +config GRKERNSEC_NO_RBAC
50441 + bool "Disable RBAC system"
50442 + help
50443 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50444 + preventing the RBAC system from being enabled. You should only say Y
50445 + here if you have no intention of using the RBAC system, so as to prevent
50446 + an attacker with root access from misusing the RBAC system to hide files
50447 + and processes when loadable module support and /dev/[k]mem have been
50448 + locked down.
50449 +
50450 +config GRKERNSEC_ACL_HIDEKERN
50451 + bool "Hide kernel processes"
50452 + help
50453 + If you say Y here, all kernel threads will be hidden to all
50454 + processes but those whose subject has the "view hidden processes"
50455 + flag.
50456 +
50457 +config GRKERNSEC_ACL_MAXTRIES
50458 + int "Maximum tries before password lockout"
50459 + default 3
50460 + help
50461 + This option enforces the maximum number of times a user can attempt
50462 + to authorize themselves with the grsecurity RBAC system before being
50463 + denied the ability to attempt authorization again for a specified time.
50464 + The lower the number, the harder it will be to brute-force a password.
50465 +
50466 +config GRKERNSEC_ACL_TIMEOUT
50467 + int "Time to wait after max password tries, in seconds"
50468 + default 30
50469 + help
50470 + This option specifies the time the user must wait after attempting to
50471 + authorize to the RBAC system with the maximum number of invalid
50472 + passwords. The higher the number, the harder it will be to brute-force
50473 + a password.
50474 +
50475 +endmenu
50476 +menu "Filesystem Protections"
50477 +depends on GRKERNSEC
50478 +
50479 +config GRKERNSEC_PROC
50480 + bool "Proc restrictions"
50481 + help
50482 + If you say Y here, the permissions of the /proc filesystem
50483 + will be altered to enhance system security and privacy. You MUST
50484 + choose either a user only restriction or a user and group restriction.
50485 + Depending upon the option you choose, you can either restrict users to
50486 + see only the processes they themselves run, or choose a group that can
50487 + view all processes and files normally restricted to root if you choose
50488 + the "restrict to user only" option. NOTE: If you're running identd as
50489 + a non-root user, you will have to run it as the group you specify here.
50490 +
50491 +config GRKERNSEC_PROC_USER
50492 + bool "Restrict /proc to user only"
50493 + depends on GRKERNSEC_PROC
50494 + help
50495 + If you say Y here, non-root users will only be able to view their own
50496 + processes, and restricts them from viewing network-related information,
50497 + and viewing kernel symbol and module information.
50498 +
50499 +config GRKERNSEC_PROC_USERGROUP
50500 + bool "Allow special group"
50501 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50502 + help
50503 + If you say Y here, you will be able to select a group that will be
50504 + able to view all processes and network-related information. If you've
50505 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50506 + remain hidden. This option is useful if you want to run identd as
50507 + a non-root user.
50508 +
50509 +config GRKERNSEC_PROC_GID
50510 + int "GID for special group"
50511 + depends on GRKERNSEC_PROC_USERGROUP
50512 + default 1001
50513 +
50514 +config GRKERNSEC_PROC_ADD
50515 + bool "Additional restrictions"
50516 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50517 + help
50518 + If you say Y here, additional restrictions will be placed on
50519 + /proc that keep normal users from viewing device information and
50520 + slabinfo information that could be useful for exploits.
50521 +
50522 +config GRKERNSEC_LINK
50523 + bool "Linking restrictions"
50524 + help
50525 + If you say Y here, /tmp race exploits will be prevented, since users
50526 + will no longer be able to follow symlinks owned by other users in
50527 + world-writable +t directories (e.g. /tmp), unless the owner of the
50528 + symlink is the owner of the directory. users will also not be
50529 + able to hardlink to files they do not own. If the sysctl option is
50530 + enabled, a sysctl option with name "linking_restrictions" is created.
50531 +
50532 +config GRKERNSEC_FIFO
50533 + bool "FIFO restrictions"
50534 + help
50535 + If you say Y here, users will not be able to write to FIFOs they don't
50536 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50537 + the FIFO is the same owner of the directory it's held in. If the sysctl
50538 + option is enabled, a sysctl option with name "fifo_restrictions" is
50539 + created.
50540 +
50541 +config GRKERNSEC_SYSFS_RESTRICT
50542 + bool "Sysfs/debugfs restriction"
50543 + depends on SYSFS
50544 + help
50545 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50546 + any filesystem normally mounted under it (e.g. debugfs) will only
50547 + be accessible by root. These filesystems generally provide access
50548 + to hardware and debug information that isn't appropriate for unprivileged
50549 + users of the system. Sysfs and debugfs have also become a large source
50550 + of new vulnerabilities, ranging from infoleaks to local compromise.
50551 + There has been very little oversight with an eye toward security involved
50552 + in adding new exporters of information to these filesystems, so their
50553 + use is discouraged.
50554 + This option is equivalent to a chmod 0700 of the mount paths.
50555 +
50556 +config GRKERNSEC_ROFS
50557 + bool "Runtime read-only mount protection"
50558 + help
50559 + If you say Y here, a sysctl option with name "romount_protect" will
50560 + be created. By setting this option to 1 at runtime, filesystems
50561 + will be protected in the following ways:
50562 + * No new writable mounts will be allowed
50563 + * Existing read-only mounts won't be able to be remounted read/write
50564 + * Write operations will be denied on all block devices
50565 + This option acts independently of grsec_lock: once it is set to 1,
50566 + it cannot be turned off. Therefore, please be mindful of the resulting
50567 + behavior if this option is enabled in an init script on a read-only
50568 + filesystem. This feature is mainly intended for secure embedded systems.
50569 +
50570 +config GRKERNSEC_CHROOT
50571 + bool "Chroot jail restrictions"
50572 + help
50573 + If you say Y here, you will be able to choose several options that will
50574 + make breaking out of a chrooted jail much more difficult. If you
50575 + encounter no software incompatibilities with the following options, it
50576 + is recommended that you enable each one.
50577 +
50578 +config GRKERNSEC_CHROOT_MOUNT
50579 + bool "Deny mounts"
50580 + depends on GRKERNSEC_CHROOT
50581 + help
50582 + If you say Y here, processes inside a chroot will not be able to
50583 + mount or remount filesystems. If the sysctl option is enabled, a
50584 + sysctl option with name "chroot_deny_mount" is created.
50585 +
50586 +config GRKERNSEC_CHROOT_DOUBLE
50587 + bool "Deny double-chroots"
50588 + depends on GRKERNSEC_CHROOT
50589 + help
50590 + If you say Y here, processes inside a chroot will not be able to chroot
50591 + again outside the chroot. This is a widely used method of breaking
50592 + out of a chroot jail and should not be allowed. If the sysctl
50593 + option is enabled, a sysctl option with name
50594 + "chroot_deny_chroot" is created.
50595 +
50596 +config GRKERNSEC_CHROOT_PIVOT
50597 + bool "Deny pivot_root in chroot"
50598 + depends on GRKERNSEC_CHROOT
50599 + help
50600 + If you say Y here, processes inside a chroot will not be able to use
50601 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50602 + works similar to chroot in that it changes the root filesystem. This
50603 + function could be misused in a chrooted process to attempt to break out
50604 + of the chroot, and therefore should not be allowed. If the sysctl
50605 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50606 + created.
50607 +
50608 +config GRKERNSEC_CHROOT_CHDIR
50609 + bool "Enforce chdir(\"/\") on all chroots"
50610 + depends on GRKERNSEC_CHROOT
50611 + help
50612 + If you say Y here, the current working directory of all newly-chrooted
50613 + applications will be set to the the root directory of the chroot.
50614 + The man page on chroot(2) states:
50615 + Note that this call does not change the current working
50616 + directory, so that `.' can be outside the tree rooted at
50617 + `/'. In particular, the super-user can escape from a
50618 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50619 +
50620 + It is recommended that you say Y here, since it's not known to break
50621 + any software. If the sysctl option is enabled, a sysctl option with
50622 + name "chroot_enforce_chdir" is created.
50623 +
50624 +config GRKERNSEC_CHROOT_CHMOD
50625 + bool "Deny (f)chmod +s"
50626 + depends on GRKERNSEC_CHROOT
50627 + help
50628 + If you say Y here, processes inside a chroot will not be able to chmod
50629 + or fchmod files to make them have suid or sgid bits. This protects
50630 + against another published method of breaking a chroot. If the sysctl
50631 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50632 + created.
50633 +
50634 +config GRKERNSEC_CHROOT_FCHDIR
50635 + bool "Deny fchdir out of chroot"
50636 + depends on GRKERNSEC_CHROOT
50637 + help
50638 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50639 + to a file descriptor of the chrooting process that points to a directory
50640 + outside the filesystem will be stopped. If the sysctl option
50641 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50642 +
50643 +config GRKERNSEC_CHROOT_MKNOD
50644 + bool "Deny mknod"
50645 + depends on GRKERNSEC_CHROOT
50646 + help
50647 + If you say Y here, processes inside a chroot will not be allowed to
50648 + mknod. The problem with using mknod inside a chroot is that it
50649 + would allow an attacker to create a device entry that is the same
50650 + as one on the physical root of your system, which could range from
50651 + anything from the console device to a device for your harddrive (which
50652 + they could then use to wipe the drive or steal data). It is recommended
50653 + that you say Y here, unless you run into software incompatibilities.
50654 + If the sysctl option is enabled, a sysctl option with name
50655 + "chroot_deny_mknod" is created.
50656 +
50657 +config GRKERNSEC_CHROOT_SHMAT
50658 + bool "Deny shmat() out of chroot"
50659 + depends on GRKERNSEC_CHROOT
50660 + help
50661 + If you say Y here, processes inside a chroot will not be able to attach
50662 + to shared memory segments that were created outside of the chroot jail.
50663 + It is recommended that you say Y here. If the sysctl option is enabled,
50664 + a sysctl option with name "chroot_deny_shmat" is created.
50665 +
50666 +config GRKERNSEC_CHROOT_UNIX
50667 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50668 + depends on GRKERNSEC_CHROOT
50669 + help
50670 + If you say Y here, processes inside a chroot will not be able to
50671 + connect to abstract (meaning not belonging to a filesystem) Unix
50672 + domain sockets that were bound outside of a chroot. It is recommended
50673 + that you say Y here. If the sysctl option is enabled, a sysctl option
50674 + with name "chroot_deny_unix" is created.
50675 +
50676 +config GRKERNSEC_CHROOT_FINDTASK
50677 + bool "Protect outside processes"
50678 + depends on GRKERNSEC_CHROOT
50679 + help
50680 + If you say Y here, processes inside a chroot will not be able to
50681 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50682 + getsid, or view any process outside of the chroot. If the sysctl
50683 + option is enabled, a sysctl option with name "chroot_findtask" is
50684 + created.
50685 +
50686 +config GRKERNSEC_CHROOT_NICE
50687 + bool "Restrict priority changes"
50688 + depends on GRKERNSEC_CHROOT
50689 + help
50690 + If you say Y here, processes inside a chroot will not be able to raise
50691 + the priority of processes in the chroot, or alter the priority of
50692 + processes outside the chroot. This provides more security than simply
50693 + removing CAP_SYS_NICE from the process' capability set. If the
50694 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50695 + is created.
50696 +
50697 +config GRKERNSEC_CHROOT_SYSCTL
50698 + bool "Deny sysctl writes"
50699 + depends on GRKERNSEC_CHROOT
50700 + help
50701 + If you say Y here, an attacker in a chroot will not be able to
50702 + write to sysctl entries, either by sysctl(2) or through a /proc
50703 + interface. It is strongly recommended that you say Y here. If the
50704 + sysctl option is enabled, a sysctl option with name
50705 + "chroot_deny_sysctl" is created.
50706 +
50707 +config GRKERNSEC_CHROOT_CAPS
50708 + bool "Capability restrictions"
50709 + depends on GRKERNSEC_CHROOT
50710 + help
50711 + If you say Y here, the capabilities on all root processes within a
50712 + chroot jail will be lowered to stop module insertion, raw i/o,
50713 + system and net admin tasks, rebooting the system, modifying immutable
50714 + files, modifying IPC owned by another, and changing the system time.
50715 + This is left an option because it can break some apps. Disable this
50716 + if your chrooted apps are having problems performing those kinds of
50717 + tasks. If the sysctl option is enabled, a sysctl option with
50718 + name "chroot_caps" is created.
50719 +
50720 +endmenu
50721 +menu "Kernel Auditing"
50722 +depends on GRKERNSEC
50723 +
50724 +config GRKERNSEC_AUDIT_GROUP
50725 + bool "Single group for auditing"
50726 + help
50727 + If you say Y here, the exec, chdir, and (un)mount logging features
50728 + will only operate on a group you specify. This option is recommended
50729 + if you only want to watch certain users instead of having a large
50730 + amount of logs from the entire system. If the sysctl option is enabled,
50731 + a sysctl option with name "audit_group" is created.
50732 +
50733 +config GRKERNSEC_AUDIT_GID
50734 + int "GID for auditing"
50735 + depends on GRKERNSEC_AUDIT_GROUP
50736 + default 1007
50737 +
50738 +config GRKERNSEC_EXECLOG
50739 + bool "Exec logging"
50740 + help
50741 + If you say Y here, all execve() calls will be logged (since the
50742 + other exec*() calls are frontends to execve(), all execution
50743 + will be logged). Useful for shell-servers that like to keep track
50744 + of their users. If the sysctl option is enabled, a sysctl option with
50745 + name "exec_logging" is created.
50746 + WARNING: This option when enabled will produce a LOT of logs, especially
50747 + on an active system.
50748 +
50749 +config GRKERNSEC_RESLOG
50750 + bool "Resource logging"
50751 + help
50752 + If you say Y here, all attempts to overstep resource limits will
50753 + be logged with the resource name, the requested size, and the current
50754 + limit. It is highly recommended that you say Y here. If the sysctl
50755 + option is enabled, a sysctl option with name "resource_logging" is
50756 + created. If the RBAC system is enabled, the sysctl value is ignored.
50757 +
50758 +config GRKERNSEC_CHROOT_EXECLOG
50759 + bool "Log execs within chroot"
50760 + help
50761 + If you say Y here, all executions inside a chroot jail will be logged
50762 + to syslog. This can cause a large amount of logs if certain
50763 + applications (eg. djb's daemontools) are installed on the system, and
50764 + is therefore left as an option. If the sysctl option is enabled, a
50765 + sysctl option with name "chroot_execlog" is created.
50766 +
50767 +config GRKERNSEC_AUDIT_PTRACE
50768 + bool "Ptrace logging"
50769 + help
50770 + If you say Y here, all attempts to attach to a process via ptrace
50771 + will be logged. If the sysctl option is enabled, a sysctl option
50772 + with name "audit_ptrace" is created.
50773 +
50774 +config GRKERNSEC_AUDIT_CHDIR
50775 + bool "Chdir logging"
50776 + help
50777 + If you say Y here, all chdir() calls will be logged. If the sysctl
50778 + option is enabled, a sysctl option with name "audit_chdir" is created.
50779 +
50780 +config GRKERNSEC_AUDIT_MOUNT
50781 + bool "(Un)Mount logging"
50782 + help
50783 + If you say Y here, all mounts and unmounts will be logged. If the
50784 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50785 + created.
50786 +
50787 +config GRKERNSEC_SIGNAL
50788 + bool "Signal logging"
50789 + help
50790 + If you say Y here, certain important signals will be logged, such as
50791 + SIGSEGV, which will as a result inform you of when a error in a program
50792 + occurred, which in some cases could mean a possible exploit attempt.
50793 + If the sysctl option is enabled, a sysctl option with name
50794 + "signal_logging" is created.
50795 +
50796 +config GRKERNSEC_FORKFAIL
50797 + bool "Fork failure logging"
50798 + help
50799 + If you say Y here, all failed fork() attempts will be logged.
50800 + This could suggest a fork bomb, or someone attempting to overstep
50801 + their process limit. If the sysctl option is enabled, a sysctl option
50802 + with name "forkfail_logging" is created.
50803 +
50804 +config GRKERNSEC_TIME
50805 + bool "Time change logging"
50806 + help
50807 + If you say Y here, any changes of the system clock will be logged.
50808 + If the sysctl option is enabled, a sysctl option with name
50809 + "timechange_logging" is created.
50810 +
50811 +config GRKERNSEC_PROC_IPADDR
50812 + bool "/proc/<pid>/ipaddr support"
50813 + help
50814 + If you say Y here, a new entry will be added to each /proc/<pid>
50815 + directory that contains the IP address of the person using the task.
50816 + The IP is carried across local TCP and AF_UNIX stream sockets.
50817 + This information can be useful for IDS/IPSes to perform remote response
50818 + to a local attack. The entry is readable by only the owner of the
50819 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50820 + the RBAC system), and thus does not create privacy concerns.
50821 +
50822 +config GRKERNSEC_RWXMAP_LOG
50823 + bool 'Denied RWX mmap/mprotect logging'
50824 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50825 + help
50826 + If you say Y here, calls to mmap() and mprotect() with explicit
50827 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50828 + denied by the PAX_MPROTECT feature. If the sysctl option is
50829 + enabled, a sysctl option with name "rwxmap_logging" is created.
50830 +
50831 +config GRKERNSEC_AUDIT_TEXTREL
50832 + bool 'ELF text relocations logging (READ HELP)'
50833 + depends on PAX_MPROTECT
50834 + help
50835 + If you say Y here, text relocations will be logged with the filename
50836 + of the offending library or binary. The purpose of the feature is
50837 + to help Linux distribution developers get rid of libraries and
50838 + binaries that need text relocations which hinder the future progress
50839 + of PaX. Only Linux distribution developers should say Y here, and
50840 + never on a production machine, as this option creates an information
50841 + leak that could aid an attacker in defeating the randomization of
50842 + a single memory region. If the sysctl option is enabled, a sysctl
50843 + option with name "audit_textrel" is created.
50844 +
50845 +endmenu
50846 +
50847 +menu "Executable Protections"
50848 +depends on GRKERNSEC
50849 +
50850 +config GRKERNSEC_EXECVE
50851 + bool "Enforce RLIMIT_NPROC on execs"
50852 + help
50853 + If you say Y here, users with a resource limit on processes will
50854 + have the value checked during execve() calls. The current system
50855 + only checks the system limit during fork() calls. If the sysctl option
50856 + is enabled, a sysctl option with name "execve_limiting" is created.
50857 +
50858 +config GRKERNSEC_DMESG
50859 + bool "Dmesg(8) restriction"
50860 + help
50861 + If you say Y here, non-root users will not be able to use dmesg(8)
50862 + to view up to the last 4kb of messages in the kernel's log buffer.
50863 + The kernel's log buffer often contains kernel addresses and other
50864 + identifying information useful to an attacker in fingerprinting a
50865 + system for a targeted exploit.
50866 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50867 + created.
50868 +
50869 +config GRKERNSEC_HARDEN_PTRACE
50870 + bool "Deter ptrace-based process snooping"
50871 + help
50872 + If you say Y here, TTY sniffers and other malicious monitoring
50873 + programs implemented through ptrace will be defeated. If you
50874 + have been using the RBAC system, this option has already been
50875 + enabled for several years for all users, with the ability to make
50876 + fine-grained exceptions.
50877 +
50878 + This option only affects the ability of non-root users to ptrace
50879 + processes that are not a descendent of the ptracing process.
50880 + This means that strace ./binary and gdb ./binary will still work,
50881 + but attaching to arbitrary processes will not. If the sysctl
50882 + option is enabled, a sysctl option with name "harden_ptrace" is
50883 + created.
50884 +
50885 +config GRKERNSEC_TPE
50886 + bool "Trusted Path Execution (TPE)"
50887 + help
50888 + If you say Y here, you will be able to choose a gid to add to the
50889 + supplementary groups of users you want to mark as "untrusted."
50890 + These users will not be able to execute any files that are not in
50891 + root-owned directories writable only by root. If the sysctl option
50892 + is enabled, a sysctl option with name "tpe" is created.
50893 +
50894 +config GRKERNSEC_TPE_ALL
50895 + bool "Partially restrict all non-root users"
50896 + depends on GRKERNSEC_TPE
50897 + help
50898 + If you say Y here, all non-root users will be covered under
50899 + a weaker TPE restriction. This is separate from, and in addition to,
50900 + the main TPE options that you have selected elsewhere. Thus, if a
50901 + "trusted" GID is chosen, this restriction applies to even that GID.
50902 + Under this restriction, all non-root users will only be allowed to
50903 + execute files in directories they own that are not group or
50904 + world-writable, or in directories owned by root and writable only by
50905 + root. If the sysctl option is enabled, a sysctl option with name
50906 + "tpe_restrict_all" is created.
50907 +
50908 +config GRKERNSEC_TPE_INVERT
50909 + bool "Invert GID option"
50910 + depends on GRKERNSEC_TPE
50911 + help
50912 + If you say Y here, the group you specify in the TPE configuration will
50913 + decide what group TPE restrictions will be *disabled* for. This
50914 + option is useful if you want TPE restrictions to be applied to most
50915 + users on the system. If the sysctl option is enabled, a sysctl option
50916 + with name "tpe_invert" is created. Unlike other sysctl options, this
50917 + entry will default to on for backward-compatibility.
50918 +
50919 +config GRKERNSEC_TPE_GID
50920 + int "GID for untrusted users"
50921 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50922 + default 1005
50923 + help
50924 + Setting this GID determines what group TPE restrictions will be
50925 + *enabled* for. If the sysctl option is enabled, a sysctl option
50926 + with name "tpe_gid" is created.
50927 +
50928 +config GRKERNSEC_TPE_GID
50929 + int "GID for trusted users"
50930 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50931 + default 1005
50932 + help
50933 + Setting this GID determines what group TPE restrictions will be
50934 + *disabled* for. If the sysctl option is enabled, a sysctl option
50935 + with name "tpe_gid" is created.
50936 +
50937 +endmenu
50938 +menu "Network Protections"
50939 +depends on GRKERNSEC
50940 +
50941 +config GRKERNSEC_RANDNET
50942 + bool "Larger entropy pools"
50943 + help
50944 + If you say Y here, the entropy pools used for many features of Linux
50945 + and grsecurity will be doubled in size. Since several grsecurity
50946 + features use additional randomness, it is recommended that you say Y
50947 + here. Saying Y here has a similar effect as modifying
50948 + /proc/sys/kernel/random/poolsize.
50949 +
50950 +config GRKERNSEC_BLACKHOLE
50951 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50952 + depends on NET
50953 + help
50954 + If you say Y here, neither TCP resets nor ICMP
50955 + destination-unreachable packets will be sent in response to packets
50956 + sent to ports for which no associated listening process exists.
50957 + This feature supports both IPV4 and IPV6 and exempts the
50958 + loopback interface from blackholing. Enabling this feature
50959 + makes a host more resilient to DoS attacks and reduces network
50960 + visibility against scanners.
50961 +
50962 + The blackhole feature as-implemented is equivalent to the FreeBSD
50963 + blackhole feature, as it prevents RST responses to all packets, not
50964 + just SYNs. Under most application behavior this causes no
50965 + problems, but applications (like haproxy) may not close certain
50966 + connections in a way that cleanly terminates them on the remote
50967 + end, leaving the remote host in LAST_ACK state. Because of this
50968 + side-effect and to prevent intentional LAST_ACK DoSes, this
50969 + feature also adds automatic mitigation against such attacks.
50970 + The mitigation drastically reduces the amount of time a socket
50971 + can spend in LAST_ACK state. If you're using haproxy and not
50972 + all servers it connects to have this option enabled, consider
50973 + disabling this feature on the haproxy host.
50974 +
50975 + If the sysctl option is enabled, two sysctl options with names
50976 + "ip_blackhole" and "lastack_retries" will be created.
50977 + While "ip_blackhole" takes the standard zero/non-zero on/off
50978 + toggle, "lastack_retries" uses the same kinds of values as
50979 + "tcp_retries1" and "tcp_retries2". The default value of 4
50980 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50981 + state.
50982 +
50983 +config GRKERNSEC_SOCKET
50984 + bool "Socket restrictions"
50985 + depends on NET
50986 + help
50987 + If you say Y here, you will be able to choose from several options.
50988 + If you assign a GID on your system and add it to the supplementary
50989 + groups of users you want to restrict socket access to, this patch
50990 + will perform up to three things, based on the option(s) you choose.
50991 +
50992 +config GRKERNSEC_SOCKET_ALL
50993 + bool "Deny any sockets to group"
50994 + depends on GRKERNSEC_SOCKET
50995 + help
50996 + If you say Y here, you will be able to choose a GID of whose users will
50997 + be unable to connect to other hosts from your machine or run server
50998 + applications from your machine. If the sysctl option is enabled, a
50999 + sysctl option with name "socket_all" is created.
51000 +
51001 +config GRKERNSEC_SOCKET_ALL_GID
51002 + int "GID to deny all sockets for"
51003 + depends on GRKERNSEC_SOCKET_ALL
51004 + default 1004
51005 + help
51006 + Here you can choose the GID to disable socket access for. Remember to
51007 + add the users you want socket access disabled for to the GID
51008 + specified here. If the sysctl option is enabled, a sysctl option
51009 + with name "socket_all_gid" is created.
51010 +
51011 +config GRKERNSEC_SOCKET_CLIENT
51012 + bool "Deny client sockets to group"
51013 + depends on GRKERNSEC_SOCKET
51014 + help
51015 + If you say Y here, you will be able to choose a GID of whose users will
51016 + be unable to connect to other hosts from your machine, but will be
51017 + able to run servers. If this option is enabled, all users in the group
51018 + you specify will have to use passive mode when initiating ftp transfers
51019 + from the shell on your machine. If the sysctl option is enabled, a
51020 + sysctl option with name "socket_client" is created.
51021 +
51022 +config GRKERNSEC_SOCKET_CLIENT_GID
51023 + int "GID to deny client sockets for"
51024 + depends on GRKERNSEC_SOCKET_CLIENT
51025 + default 1003
51026 + help
51027 + Here you can choose the GID to disable client socket access for.
51028 + Remember to add the users you want client socket access disabled for to
51029 + the GID specified here. If the sysctl option is enabled, a sysctl
51030 + option with name "socket_client_gid" is created.
51031 +
51032 +config GRKERNSEC_SOCKET_SERVER
51033 + bool "Deny server sockets to group"
51034 + depends on GRKERNSEC_SOCKET
51035 + help
51036 + If you say Y here, you will be able to choose a GID of whose users will
51037 + be unable to run server applications from your machine. If the sysctl
51038 + option is enabled, a sysctl option with name "socket_server" is created.
51039 +
51040 +config GRKERNSEC_SOCKET_SERVER_GID
51041 + int "GID to deny server sockets for"
51042 + depends on GRKERNSEC_SOCKET_SERVER
51043 + default 1002
51044 + help
51045 + Here you can choose the GID to disable server socket access for.
51046 + Remember to add the users you want server socket access disabled for to
51047 + the GID specified here. If the sysctl option is enabled, a sysctl
51048 + option with name "socket_server_gid" is created.
51049 +
51050 +endmenu
51051 +menu "Sysctl support"
51052 +depends on GRKERNSEC && SYSCTL
51053 +
51054 +config GRKERNSEC_SYSCTL
51055 + bool "Sysctl support"
51056 + help
51057 + If you say Y here, you will be able to change the options that
51058 + grsecurity runs with at bootup, without having to recompile your
51059 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51060 + to enable (1) or disable (0) various features. All the sysctl entries
51061 + are mutable until the "grsec_lock" entry is set to a non-zero value.
51062 + All features enabled in the kernel configuration are disabled at boot
51063 + if you do not say Y to the "Turn on features by default" option.
51064 + All options should be set at startup, and the grsec_lock entry should
51065 + be set to a non-zero value after all the options are set.
51066 + *THIS IS EXTREMELY IMPORTANT*
51067 +
51068 +config GRKERNSEC_SYSCTL_DISTRO
51069 + bool "Extra sysctl support for distro makers (READ HELP)"
51070 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51071 + help
51072 + If you say Y here, additional sysctl options will be created
51073 + for features that affect processes running as root. Therefore,
51074 + it is critical when using this option that the grsec_lock entry be
51075 + enabled after boot. Only distros with prebuilt kernel packages
51076 + with this option enabled that can ensure grsec_lock is enabled
51077 + after boot should use this option.
51078 + *Failure to set grsec_lock after boot makes all grsec features
51079 + this option covers useless*
51080 +
51081 + Currently this option creates the following sysctl entries:
51082 + "Disable Privileged I/O": "disable_priv_io"
51083 +
51084 +config GRKERNSEC_SYSCTL_ON
51085 + bool "Turn on features by default"
51086 + depends on GRKERNSEC_SYSCTL
51087 + help
51088 + If you say Y here, instead of having all features enabled in the
51089 + kernel configuration disabled at boot time, the features will be
51090 + enabled at boot time. It is recommended you say Y here unless
51091 + there is some reason you would want all sysctl-tunable features to
51092 + be disabled by default. As mentioned elsewhere, it is important
51093 + to enable the grsec_lock entry once you have finished modifying
51094 + the sysctl entries.
51095 +
51096 +endmenu
51097 +menu "Logging Options"
51098 +depends on GRKERNSEC
51099 +
51100 +config GRKERNSEC_FLOODTIME
51101 + int "Seconds in between log messages (minimum)"
51102 + default 10
51103 + help
51104 + This option allows you to enforce the number of seconds between
51105 + grsecurity log messages. The default should be suitable for most
51106 + people, however, if you choose to change it, choose a value small enough
51107 + to allow informative logs to be produced, but large enough to
51108 + prevent flooding.
51109 +
51110 +config GRKERNSEC_FLOODBURST
51111 + int "Number of messages in a burst (maximum)"
51112 + default 4
51113 + help
51114 + This option allows you to choose the maximum number of messages allowed
51115 + within the flood time interval you chose in a separate option. The
51116 + default should be suitable for most people, however if you find that
51117 + many of your logs are being interpreted as flooding, you may want to
51118 + raise this value.
51119 +
51120 +endmenu
51121 +
51122 +endmenu
51123 diff -urNp linux-2.6.39.4/grsecurity/Makefile linux-2.6.39.4/grsecurity/Makefile
51124 --- linux-2.6.39.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
51125 +++ linux-2.6.39.4/grsecurity/Makefile 2011-08-17 19:03:10.000000000 -0400
51126 @@ -0,0 +1,33 @@
51127 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51128 +# during 2001-2009 it has been completely redesigned by Brad Spengler
51129 +# into an RBAC system
51130 +#
51131 +# All code in this directory and various hooks inserted throughout the kernel
51132 +# are copyright Brad Spengler - Open Source Security, Inc., and released
51133 +# under the GPL v2 or higher
51134 +
51135 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51136 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
51137 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51138 +
51139 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51140 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51141 + gracl_learn.o grsec_log.o
51142 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51143 +
51144 +ifdef CONFIG_NET
51145 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o grsec_sock.o
51146 +endif
51147 +
51148 +ifndef CONFIG_GRKERNSEC
51149 +obj-y += grsec_disabled.o
51150 +endif
51151 +
51152 +ifdef CONFIG_GRKERNSEC_HIDESYM
51153 +extra-y := grsec_hidesym.o
51154 +$(obj)/grsec_hidesym.o:
51155 + @-chmod -f 500 /boot
51156 + @-chmod -f 500 /lib/modules
51157 + @-chmod -f 700 .
51158 + @echo ' grsec: protected kernel image paths'
51159 +endif
51160 diff -urNp linux-2.6.39.4/include/acpi/acpi_bus.h linux-2.6.39.4/include/acpi/acpi_bus.h
51161 --- linux-2.6.39.4/include/acpi/acpi_bus.h 2011-05-19 00:06:34.000000000 -0400
51162 +++ linux-2.6.39.4/include/acpi/acpi_bus.h 2011-08-05 20:34:06.000000000 -0400
51163 @@ -107,7 +107,7 @@ struct acpi_device_ops {
51164 acpi_op_bind bind;
51165 acpi_op_unbind unbind;
51166 acpi_op_notify notify;
51167 -};
51168 +} __no_const;
51169
51170 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
51171
51172 diff -urNp linux-2.6.39.4/include/asm-generic/atomic-long.h linux-2.6.39.4/include/asm-generic/atomic-long.h
51173 --- linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-05-19 00:06:34.000000000 -0400
51174 +++ linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-08-05 20:34:06.000000000 -0400
51175 @@ -22,6 +22,12 @@
51176
51177 typedef atomic64_t atomic_long_t;
51178
51179 +#ifdef CONFIG_PAX_REFCOUNT
51180 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
51181 +#else
51182 +typedef atomic64_t atomic_long_unchecked_t;
51183 +#endif
51184 +
51185 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
51186
51187 static inline long atomic_long_read(atomic_long_t *l)
51188 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
51189 return (long)atomic64_read(v);
51190 }
51191
51192 +#ifdef CONFIG_PAX_REFCOUNT
51193 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51194 +{
51195 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51196 +
51197 + return (long)atomic64_read_unchecked(v);
51198 +}
51199 +#endif
51200 +
51201 static inline void atomic_long_set(atomic_long_t *l, long i)
51202 {
51203 atomic64_t *v = (atomic64_t *)l;
51204 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
51205 atomic64_set(v, i);
51206 }
51207
51208 +#ifdef CONFIG_PAX_REFCOUNT
51209 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51210 +{
51211 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51212 +
51213 + atomic64_set_unchecked(v, i);
51214 +}
51215 +#endif
51216 +
51217 static inline void atomic_long_inc(atomic_long_t *l)
51218 {
51219 atomic64_t *v = (atomic64_t *)l;
51220 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
51221 atomic64_inc(v);
51222 }
51223
51224 +#ifdef CONFIG_PAX_REFCOUNT
51225 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51226 +{
51227 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51228 +
51229 + atomic64_inc_unchecked(v);
51230 +}
51231 +#endif
51232 +
51233 static inline void atomic_long_dec(atomic_long_t *l)
51234 {
51235 atomic64_t *v = (atomic64_t *)l;
51236 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51237 atomic64_dec(v);
51238 }
51239
51240 +#ifdef CONFIG_PAX_REFCOUNT
51241 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51242 +{
51243 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51244 +
51245 + atomic64_dec_unchecked(v);
51246 +}
51247 +#endif
51248 +
51249 static inline void atomic_long_add(long i, atomic_long_t *l)
51250 {
51251 atomic64_t *v = (atomic64_t *)l;
51252 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51253 atomic64_add(i, v);
51254 }
51255
51256 +#ifdef CONFIG_PAX_REFCOUNT
51257 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51258 +{
51259 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51260 +
51261 + atomic64_add_unchecked(i, v);
51262 +}
51263 +#endif
51264 +
51265 static inline void atomic_long_sub(long i, atomic_long_t *l)
51266 {
51267 atomic64_t *v = (atomic64_t *)l;
51268 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51269 atomic64_sub(i, v);
51270 }
51271
51272 +#ifdef CONFIG_PAX_REFCOUNT
51273 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51274 +{
51275 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51276 +
51277 + atomic64_sub_unchecked(i, v);
51278 +}
51279 +#endif
51280 +
51281 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51282 {
51283 atomic64_t *v = (atomic64_t *)l;
51284 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51285 return (long)atomic64_inc_return(v);
51286 }
51287
51288 +#ifdef CONFIG_PAX_REFCOUNT
51289 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51290 +{
51291 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51292 +
51293 + return (long)atomic64_inc_return_unchecked(v);
51294 +}
51295 +#endif
51296 +
51297 static inline long atomic_long_dec_return(atomic_long_t *l)
51298 {
51299 atomic64_t *v = (atomic64_t *)l;
51300 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51301
51302 typedef atomic_t atomic_long_t;
51303
51304 +#ifdef CONFIG_PAX_REFCOUNT
51305 +typedef atomic_unchecked_t atomic_long_unchecked_t;
51306 +#else
51307 +typedef atomic_t atomic_long_unchecked_t;
51308 +#endif
51309 +
51310 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51311 static inline long atomic_long_read(atomic_long_t *l)
51312 {
51313 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51314 return (long)atomic_read(v);
51315 }
51316
51317 +#ifdef CONFIG_PAX_REFCOUNT
51318 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51319 +{
51320 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51321 +
51322 + return (long)atomic_read_unchecked(v);
51323 +}
51324 +#endif
51325 +
51326 static inline void atomic_long_set(atomic_long_t *l, long i)
51327 {
51328 atomic_t *v = (atomic_t *)l;
51329 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51330 atomic_set(v, i);
51331 }
51332
51333 +#ifdef CONFIG_PAX_REFCOUNT
51334 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51335 +{
51336 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51337 +
51338 + atomic_set_unchecked(v, i);
51339 +}
51340 +#endif
51341 +
51342 static inline void atomic_long_inc(atomic_long_t *l)
51343 {
51344 atomic_t *v = (atomic_t *)l;
51345 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51346 atomic_inc(v);
51347 }
51348
51349 +#ifdef CONFIG_PAX_REFCOUNT
51350 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51351 +{
51352 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51353 +
51354 + atomic_inc_unchecked(v);
51355 +}
51356 +#endif
51357 +
51358 static inline void atomic_long_dec(atomic_long_t *l)
51359 {
51360 atomic_t *v = (atomic_t *)l;
51361 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51362 atomic_dec(v);
51363 }
51364
51365 +#ifdef CONFIG_PAX_REFCOUNT
51366 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51367 +{
51368 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51369 +
51370 + atomic_dec_unchecked(v);
51371 +}
51372 +#endif
51373 +
51374 static inline void atomic_long_add(long i, atomic_long_t *l)
51375 {
51376 atomic_t *v = (atomic_t *)l;
51377 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51378 atomic_add(i, v);
51379 }
51380
51381 +#ifdef CONFIG_PAX_REFCOUNT
51382 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51383 +{
51384 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51385 +
51386 + atomic_add_unchecked(i, v);
51387 +}
51388 +#endif
51389 +
51390 static inline void atomic_long_sub(long i, atomic_long_t *l)
51391 {
51392 atomic_t *v = (atomic_t *)l;
51393 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51394 atomic_sub(i, v);
51395 }
51396
51397 +#ifdef CONFIG_PAX_REFCOUNT
51398 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51399 +{
51400 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51401 +
51402 + atomic_sub_unchecked(i, v);
51403 +}
51404 +#endif
51405 +
51406 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51407 {
51408 atomic_t *v = (atomic_t *)l;
51409 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51410 return (long)atomic_inc_return(v);
51411 }
51412
51413 +#ifdef CONFIG_PAX_REFCOUNT
51414 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51415 +{
51416 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51417 +
51418 + return (long)atomic_inc_return_unchecked(v);
51419 +}
51420 +#endif
51421 +
51422 static inline long atomic_long_dec_return(atomic_long_t *l)
51423 {
51424 atomic_t *v = (atomic_t *)l;
51425 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51426
51427 #endif /* BITS_PER_LONG == 64 */
51428
51429 +#ifdef CONFIG_PAX_REFCOUNT
51430 +static inline void pax_refcount_needs_these_functions(void)
51431 +{
51432 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
51433 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51434 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51435 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51436 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51437 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51438 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51439 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51440 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51441 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51442 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51443 +
51444 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51445 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51446 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51447 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51448 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51449 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51450 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51451 +}
51452 +#else
51453 +#define atomic_read_unchecked(v) atomic_read(v)
51454 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51455 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51456 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51457 +#define atomic_inc_unchecked(v) atomic_inc(v)
51458 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51459 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51460 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51461 +#define atomic_dec_unchecked(v) atomic_dec(v)
51462 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51463 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51464 +
51465 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
51466 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51467 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51468 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51469 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51470 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51471 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51472 +#endif
51473 +
51474 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51475 diff -urNp linux-2.6.39.4/include/asm-generic/cache.h linux-2.6.39.4/include/asm-generic/cache.h
51476 --- linux-2.6.39.4/include/asm-generic/cache.h 2011-05-19 00:06:34.000000000 -0400
51477 +++ linux-2.6.39.4/include/asm-generic/cache.h 2011-08-05 19:44:37.000000000 -0400
51478 @@ -6,7 +6,7 @@
51479 * cache lines need to provide their own cache.h.
51480 */
51481
51482 -#define L1_CACHE_SHIFT 5
51483 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51484 +#define L1_CACHE_SHIFT 5UL
51485 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51486
51487 #endif /* __ASM_GENERIC_CACHE_H */
51488 diff -urNp linux-2.6.39.4/include/asm-generic/int-l64.h linux-2.6.39.4/include/asm-generic/int-l64.h
51489 --- linux-2.6.39.4/include/asm-generic/int-l64.h 2011-05-19 00:06:34.000000000 -0400
51490 +++ linux-2.6.39.4/include/asm-generic/int-l64.h 2011-08-05 19:44:37.000000000 -0400
51491 @@ -46,6 +46,8 @@ typedef unsigned int u32;
51492 typedef signed long s64;
51493 typedef unsigned long u64;
51494
51495 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51496 +
51497 #define S8_C(x) x
51498 #define U8_C(x) x ## U
51499 #define S16_C(x) x
51500 diff -urNp linux-2.6.39.4/include/asm-generic/int-ll64.h linux-2.6.39.4/include/asm-generic/int-ll64.h
51501 --- linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-05-19 00:06:34.000000000 -0400
51502 +++ linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-08-05 19:44:37.000000000 -0400
51503 @@ -51,6 +51,8 @@ typedef unsigned int u32;
51504 typedef signed long long s64;
51505 typedef unsigned long long u64;
51506
51507 +typedef unsigned long long intoverflow_t;
51508 +
51509 #define S8_C(x) x
51510 #define U8_C(x) x ## U
51511 #define S16_C(x) x
51512 diff -urNp linux-2.6.39.4/include/asm-generic/kmap_types.h linux-2.6.39.4/include/asm-generic/kmap_types.h
51513 --- linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
51514 +++ linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-08-05 19:44:37.000000000 -0400
51515 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51516 KMAP_D(17) KM_NMI,
51517 KMAP_D(18) KM_NMI_PTE,
51518 KMAP_D(19) KM_KDB,
51519 +KMAP_D(20) KM_CLEARPAGE,
51520 /*
51521 * Remember to update debug_kmap_atomic() when adding new kmap types!
51522 */
51523 -KMAP_D(20) KM_TYPE_NR
51524 +KMAP_D(21) KM_TYPE_NR
51525 };
51526
51527 #undef KMAP_D
51528 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable.h linux-2.6.39.4/include/asm-generic/pgtable.h
51529 --- linux-2.6.39.4/include/asm-generic/pgtable.h 2011-05-19 00:06:34.000000000 -0400
51530 +++ linux-2.6.39.4/include/asm-generic/pgtable.h 2011-08-05 19:44:37.000000000 -0400
51531 @@ -447,6 +447,14 @@ static inline int pmd_write(pmd_t pmd)
51532 #endif /* __HAVE_ARCH_PMD_WRITE */
51533 #endif
51534
51535 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51536 +static inline unsigned long pax_open_kernel(void) { return 0; }
51537 +#endif
51538 +
51539 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51540 +static inline unsigned long pax_close_kernel(void) { return 0; }
51541 +#endif
51542 +
51543 #endif /* !__ASSEMBLY__ */
51544
51545 #endif /* _ASM_GENERIC_PGTABLE_H */
51546 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h
51547 --- linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-05-19 00:06:34.000000000 -0400
51548 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-08-05 19:44:37.000000000 -0400
51549 @@ -1,14 +1,19 @@
51550 #ifndef _PGTABLE_NOPMD_H
51551 #define _PGTABLE_NOPMD_H
51552
51553 -#ifndef __ASSEMBLY__
51554 -
51555 #include <asm-generic/pgtable-nopud.h>
51556
51557 -struct mm_struct;
51558 -
51559 #define __PAGETABLE_PMD_FOLDED
51560
51561 +#define PMD_SHIFT PUD_SHIFT
51562 +#define PTRS_PER_PMD 1
51563 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51564 +#define PMD_MASK (~(PMD_SIZE-1))
51565 +
51566 +#ifndef __ASSEMBLY__
51567 +
51568 +struct mm_struct;
51569 +
51570 /*
51571 * Having the pmd type consist of a pud gets the size right, and allows
51572 * us to conceptually access the pud entry that this pmd is folded into
51573 @@ -16,11 +21,6 @@ struct mm_struct;
51574 */
51575 typedef struct { pud_t pud; } pmd_t;
51576
51577 -#define PMD_SHIFT PUD_SHIFT
51578 -#define PTRS_PER_PMD 1
51579 -#define PMD_SIZE (1UL << PMD_SHIFT)
51580 -#define PMD_MASK (~(PMD_SIZE-1))
51581 -
51582 /*
51583 * The "pud_xxx()" functions here are trivial for a folded two-level
51584 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51585 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopud.h linux-2.6.39.4/include/asm-generic/pgtable-nopud.h
51586 --- linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-05-19 00:06:34.000000000 -0400
51587 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-08-05 19:44:37.000000000 -0400
51588 @@ -1,10 +1,15 @@
51589 #ifndef _PGTABLE_NOPUD_H
51590 #define _PGTABLE_NOPUD_H
51591
51592 -#ifndef __ASSEMBLY__
51593 -
51594 #define __PAGETABLE_PUD_FOLDED
51595
51596 +#define PUD_SHIFT PGDIR_SHIFT
51597 +#define PTRS_PER_PUD 1
51598 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51599 +#define PUD_MASK (~(PUD_SIZE-1))
51600 +
51601 +#ifndef __ASSEMBLY__
51602 +
51603 /*
51604 * Having the pud type consist of a pgd gets the size right, and allows
51605 * us to conceptually access the pgd entry that this pud is folded into
51606 @@ -12,11 +17,6 @@
51607 */
51608 typedef struct { pgd_t pgd; } pud_t;
51609
51610 -#define PUD_SHIFT PGDIR_SHIFT
51611 -#define PTRS_PER_PUD 1
51612 -#define PUD_SIZE (1UL << PUD_SHIFT)
51613 -#define PUD_MASK (~(PUD_SIZE-1))
51614 -
51615 /*
51616 * The "pgd_xxx()" functions here are trivial for a folded two-level
51617 * setup: the pud is never bad, and a pud always exists (as it's folded
51618 diff -urNp linux-2.6.39.4/include/asm-generic/vmlinux.lds.h linux-2.6.39.4/include/asm-generic/vmlinux.lds.h
51619 --- linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-05-19 00:06:34.000000000 -0400
51620 +++ linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-08-05 19:44:37.000000000 -0400
51621 @@ -213,6 +213,7 @@
51622 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51623 VMLINUX_SYMBOL(__start_rodata) = .; \
51624 *(.rodata) *(.rodata.*) \
51625 + *(.data..read_only) \
51626 *(__vermagic) /* Kernel version magic */ \
51627 . = ALIGN(8); \
51628 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51629 @@ -707,14 +708,15 @@
51630 * section in the linker script will go there too. @phdr should have
51631 * a leading colon.
51632 *
51633 - * Note that this macros defines __per_cpu_load as an absolute symbol.
51634 + * Note that this macros defines per_cpu_load as an absolute symbol.
51635 * If there is no need to put the percpu section at a predetermined
51636 * address, use PERCPU().
51637 */
51638 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51639 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
51640 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51641 + per_cpu_load = .; \
51642 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51643 - LOAD_OFFSET) { \
51644 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51645 VMLINUX_SYMBOL(__per_cpu_start) = .; \
51646 *(.data..percpu..first) \
51647 . = ALIGN(PAGE_SIZE); \
51648 @@ -726,7 +728,7 @@
51649 *(.data..percpu..shared_aligned) \
51650 VMLINUX_SYMBOL(__per_cpu_end) = .; \
51651 } phdr \
51652 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51653 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51654
51655 /**
51656 * PERCPU - define output section for percpu area, simple version
51657 diff -urNp linux-2.6.39.4/include/drm/drm_crtc_helper.h linux-2.6.39.4/include/drm/drm_crtc_helper.h
51658 --- linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-05-19 00:06:34.000000000 -0400
51659 +++ linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-08-05 20:34:06.000000000 -0400
51660 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51661
51662 /* disable crtc when not in use - more explicit than dpms off */
51663 void (*disable)(struct drm_crtc *crtc);
51664 -};
51665 +} __no_const;
51666
51667 struct drm_encoder_helper_funcs {
51668 void (*dpms)(struct drm_encoder *encoder, int mode);
51669 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51670 struct drm_connector *connector);
51671 /* disable encoder when not in use - more explicit than dpms off */
51672 void (*disable)(struct drm_encoder *encoder);
51673 -};
51674 +} __no_const;
51675
51676 struct drm_connector_helper_funcs {
51677 int (*get_modes)(struct drm_connector *connector);
51678 diff -urNp linux-2.6.39.4/include/drm/drmP.h linux-2.6.39.4/include/drm/drmP.h
51679 --- linux-2.6.39.4/include/drm/drmP.h 2011-05-19 00:06:34.000000000 -0400
51680 +++ linux-2.6.39.4/include/drm/drmP.h 2011-08-05 20:34:06.000000000 -0400
51681 @@ -73,6 +73,7 @@
51682 #include <linux/workqueue.h>
51683 #include <linux/poll.h>
51684 #include <asm/pgalloc.h>
51685 +#include <asm/local.h>
51686 #include "drm.h"
51687
51688 #include <linux/idr.h>
51689 @@ -1023,7 +1024,7 @@ struct drm_device {
51690
51691 /** \name Usage Counters */
51692 /*@{ */
51693 - int open_count; /**< Outstanding files open */
51694 + local_t open_count; /**< Outstanding files open */
51695 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51696 atomic_t vma_count; /**< Outstanding vma areas open */
51697 int buf_use; /**< Buffers in use -- cannot alloc */
51698 @@ -1034,7 +1035,7 @@ struct drm_device {
51699 /*@{ */
51700 unsigned long counters;
51701 enum drm_stat_type types[15];
51702 - atomic_t counts[15];
51703 + atomic_unchecked_t counts[15];
51704 /*@} */
51705
51706 struct list_head filelist;
51707 diff -urNp linux-2.6.39.4/include/drm/ttm/ttm_memory.h linux-2.6.39.4/include/drm/ttm/ttm_memory.h
51708 --- linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-05-19 00:06:34.000000000 -0400
51709 +++ linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-08-05 20:34:06.000000000 -0400
51710 @@ -47,7 +47,7 @@
51711
51712 struct ttm_mem_shrink {
51713 int (*do_shrink) (struct ttm_mem_shrink *);
51714 -};
51715 +} __no_const;
51716
51717 /**
51718 * struct ttm_mem_global - Global memory accounting structure.
51719 diff -urNp linux-2.6.39.4/include/linux/a.out.h linux-2.6.39.4/include/linux/a.out.h
51720 --- linux-2.6.39.4/include/linux/a.out.h 2011-05-19 00:06:34.000000000 -0400
51721 +++ linux-2.6.39.4/include/linux/a.out.h 2011-08-05 19:44:37.000000000 -0400
51722 @@ -39,6 +39,14 @@ enum machine_type {
51723 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51724 };
51725
51726 +/* Constants for the N_FLAGS field */
51727 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51728 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51729 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51730 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51731 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51732 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51733 +
51734 #if !defined (N_MAGIC)
51735 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51736 #endif
51737 diff -urNp linux-2.6.39.4/include/linux/atmdev.h linux-2.6.39.4/include/linux/atmdev.h
51738 --- linux-2.6.39.4/include/linux/atmdev.h 2011-05-19 00:06:34.000000000 -0400
51739 +++ linux-2.6.39.4/include/linux/atmdev.h 2011-08-05 19:44:37.000000000 -0400
51740 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51741 #endif
51742
51743 struct k_atm_aal_stats {
51744 -#define __HANDLE_ITEM(i) atomic_t i
51745 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
51746 __AAL_STAT_ITEMS
51747 #undef __HANDLE_ITEM
51748 };
51749 diff -urNp linux-2.6.39.4/include/linux/binfmts.h linux-2.6.39.4/include/linux/binfmts.h
51750 --- linux-2.6.39.4/include/linux/binfmts.h 2011-05-19 00:06:34.000000000 -0400
51751 +++ linux-2.6.39.4/include/linux/binfmts.h 2011-08-05 19:44:37.000000000 -0400
51752 @@ -92,6 +92,7 @@ struct linux_binfmt {
51753 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51754 int (*load_shlib)(struct file *);
51755 int (*core_dump)(struct coredump_params *cprm);
51756 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51757 unsigned long min_coredump; /* minimal dump size */
51758 };
51759
51760 diff -urNp linux-2.6.39.4/include/linux/blkdev.h linux-2.6.39.4/include/linux/blkdev.h
51761 --- linux-2.6.39.4/include/linux/blkdev.h 2011-06-03 00:04:14.000000000 -0400
51762 +++ linux-2.6.39.4/include/linux/blkdev.h 2011-08-05 20:34:06.000000000 -0400
51763 @@ -1307,7 +1307,7 @@ struct block_device_operations {
51764 int (*getgeo)(struct block_device *, struct hd_geometry *);
51765 /* this callback is with swap_lock and sometimes page table lock held */
51766 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51767 - struct module *owner;
51768 + struct module * const owner;
51769 };
51770
51771 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51772 diff -urNp linux-2.6.39.4/include/linux/blktrace_api.h linux-2.6.39.4/include/linux/blktrace_api.h
51773 --- linux-2.6.39.4/include/linux/blktrace_api.h 2011-05-19 00:06:34.000000000 -0400
51774 +++ linux-2.6.39.4/include/linux/blktrace_api.h 2011-08-05 19:44:37.000000000 -0400
51775 @@ -161,7 +161,7 @@ struct blk_trace {
51776 struct dentry *dir;
51777 struct dentry *dropped_file;
51778 struct dentry *msg_file;
51779 - atomic_t dropped;
51780 + atomic_unchecked_t dropped;
51781 };
51782
51783 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51784 diff -urNp linux-2.6.39.4/include/linux/byteorder/little_endian.h linux-2.6.39.4/include/linux/byteorder/little_endian.h
51785 --- linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-05-19 00:06:34.000000000 -0400
51786 +++ linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-08-05 19:44:37.000000000 -0400
51787 @@ -42,51 +42,51 @@
51788
51789 static inline __le64 __cpu_to_le64p(const __u64 *p)
51790 {
51791 - return (__force __le64)*p;
51792 + return (__force const __le64)*p;
51793 }
51794 static inline __u64 __le64_to_cpup(const __le64 *p)
51795 {
51796 - return (__force __u64)*p;
51797 + return (__force const __u64)*p;
51798 }
51799 static inline __le32 __cpu_to_le32p(const __u32 *p)
51800 {
51801 - return (__force __le32)*p;
51802 + return (__force const __le32)*p;
51803 }
51804 static inline __u32 __le32_to_cpup(const __le32 *p)
51805 {
51806 - return (__force __u32)*p;
51807 + return (__force const __u32)*p;
51808 }
51809 static inline __le16 __cpu_to_le16p(const __u16 *p)
51810 {
51811 - return (__force __le16)*p;
51812 + return (__force const __le16)*p;
51813 }
51814 static inline __u16 __le16_to_cpup(const __le16 *p)
51815 {
51816 - return (__force __u16)*p;
51817 + return (__force const __u16)*p;
51818 }
51819 static inline __be64 __cpu_to_be64p(const __u64 *p)
51820 {
51821 - return (__force __be64)__swab64p(p);
51822 + return (__force const __be64)__swab64p(p);
51823 }
51824 static inline __u64 __be64_to_cpup(const __be64 *p)
51825 {
51826 - return __swab64p((__u64 *)p);
51827 + return __swab64p((const __u64 *)p);
51828 }
51829 static inline __be32 __cpu_to_be32p(const __u32 *p)
51830 {
51831 - return (__force __be32)__swab32p(p);
51832 + return (__force const __be32)__swab32p(p);
51833 }
51834 static inline __u32 __be32_to_cpup(const __be32 *p)
51835 {
51836 - return __swab32p((__u32 *)p);
51837 + return __swab32p((const __u32 *)p);
51838 }
51839 static inline __be16 __cpu_to_be16p(const __u16 *p)
51840 {
51841 - return (__force __be16)__swab16p(p);
51842 + return (__force const __be16)__swab16p(p);
51843 }
51844 static inline __u16 __be16_to_cpup(const __be16 *p)
51845 {
51846 - return __swab16p((__u16 *)p);
51847 + return __swab16p((const __u16 *)p);
51848 }
51849 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51850 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51851 diff -urNp linux-2.6.39.4/include/linux/cache.h linux-2.6.39.4/include/linux/cache.h
51852 --- linux-2.6.39.4/include/linux/cache.h 2011-05-19 00:06:34.000000000 -0400
51853 +++ linux-2.6.39.4/include/linux/cache.h 2011-08-05 19:44:37.000000000 -0400
51854 @@ -16,6 +16,10 @@
51855 #define __read_mostly
51856 #endif
51857
51858 +#ifndef __read_only
51859 +#define __read_only __read_mostly
51860 +#endif
51861 +
51862 #ifndef ____cacheline_aligned
51863 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51864 #endif
51865 diff -urNp linux-2.6.39.4/include/linux/capability.h linux-2.6.39.4/include/linux/capability.h
51866 --- linux-2.6.39.4/include/linux/capability.h 2011-05-19 00:06:34.000000000 -0400
51867 +++ linux-2.6.39.4/include/linux/capability.h 2011-08-05 19:44:37.000000000 -0400
51868 @@ -547,6 +547,9 @@ extern bool capable(int cap);
51869 extern bool ns_capable(struct user_namespace *ns, int cap);
51870 extern bool task_ns_capable(struct task_struct *t, int cap);
51871 extern bool nsown_capable(int cap);
51872 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51873 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51874 +extern bool capable_nolog(int cap);
51875
51876 /* audit system wants to get cap info from files as well */
51877 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51878 diff -urNp linux-2.6.39.4/include/linux/compiler-gcc4.h linux-2.6.39.4/include/linux/compiler-gcc4.h
51879 --- linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-05-19 00:06:34.000000000 -0400
51880 +++ linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-08-05 20:34:06.000000000 -0400
51881 @@ -31,6 +31,9 @@
51882
51883
51884 #if __GNUC_MINOR__ >= 5
51885 +
51886 +#define __no_const __attribute__((no_const))
51887 +
51888 /*
51889 * Mark a position in code as unreachable. This can be used to
51890 * suppress control flow warnings after asm blocks that transfer
51891 @@ -46,6 +49,11 @@
51892 #define __noclone __attribute__((__noclone__))
51893
51894 #endif
51895 +
51896 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51897 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51898 +#define __bos0(ptr) __bos((ptr), 0)
51899 +#define __bos1(ptr) __bos((ptr), 1)
51900 #endif
51901
51902 #if __GNUC_MINOR__ > 0
51903 diff -urNp linux-2.6.39.4/include/linux/compiler.h linux-2.6.39.4/include/linux/compiler.h
51904 --- linux-2.6.39.4/include/linux/compiler.h 2011-05-19 00:06:34.000000000 -0400
51905 +++ linux-2.6.39.4/include/linux/compiler.h 2011-08-05 20:34:06.000000000 -0400
51906 @@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51907 # define __attribute_const__ /* unimplemented */
51908 #endif
51909
51910 +#ifndef __no_const
51911 +# define __no_const
51912 +#endif
51913 +
51914 /*
51915 * Tell gcc if a function is cold. The compiler will assume any path
51916 * directly leading to the call is unlikely.
51917 @@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51918 #define __cold
51919 #endif
51920
51921 +#ifndef __alloc_size
51922 +#define __alloc_size(...)
51923 +#endif
51924 +
51925 +#ifndef __bos
51926 +#define __bos(ptr, arg)
51927 +#endif
51928 +
51929 +#ifndef __bos0
51930 +#define __bos0(ptr)
51931 +#endif
51932 +
51933 +#ifndef __bos1
51934 +#define __bos1(ptr)
51935 +#endif
51936 +
51937 /* Simple shorthand for a section definition */
51938 #ifndef __section
51939 # define __section(S) __attribute__ ((__section__(#S)))
51940 @@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51941 * use is to mediate communication between process-level code and irq/NMI
51942 * handlers, all running on the same CPU.
51943 */
51944 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51945 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51946 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51947
51948 #endif /* __LINUX_COMPILER_H */
51949 diff -urNp linux-2.6.39.4/include/linux/cpuset.h linux-2.6.39.4/include/linux/cpuset.h
51950 --- linux-2.6.39.4/include/linux/cpuset.h 2011-05-19 00:06:34.000000000 -0400
51951 +++ linux-2.6.39.4/include/linux/cpuset.h 2011-08-05 19:44:37.000000000 -0400
51952 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51953 * nodemask.
51954 */
51955 smp_mb();
51956 - --ACCESS_ONCE(current->mems_allowed_change_disable);
51957 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51958 }
51959
51960 static inline void set_mems_allowed(nodemask_t nodemask)
51961 diff -urNp linux-2.6.39.4/include/linux/crypto.h linux-2.6.39.4/include/linux/crypto.h
51962 --- linux-2.6.39.4/include/linux/crypto.h 2011-05-19 00:06:34.000000000 -0400
51963 +++ linux-2.6.39.4/include/linux/crypto.h 2011-08-05 20:34:06.000000000 -0400
51964 @@ -361,7 +361,7 @@ struct cipher_tfm {
51965 const u8 *key, unsigned int keylen);
51966 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51967 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51968 -};
51969 +} __no_const;
51970
51971 struct hash_tfm {
51972 int (*init)(struct hash_desc *desc);
51973 @@ -382,13 +382,13 @@ struct compress_tfm {
51974 int (*cot_decompress)(struct crypto_tfm *tfm,
51975 const u8 *src, unsigned int slen,
51976 u8 *dst, unsigned int *dlen);
51977 -};
51978 +} __no_const;
51979
51980 struct rng_tfm {
51981 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51982 unsigned int dlen);
51983 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51984 -};
51985 +} __no_const;
51986
51987 #define crt_ablkcipher crt_u.ablkcipher
51988 #define crt_aead crt_u.aead
51989 diff -urNp linux-2.6.39.4/include/linux/decompress/mm.h linux-2.6.39.4/include/linux/decompress/mm.h
51990 --- linux-2.6.39.4/include/linux/decompress/mm.h 2011-05-19 00:06:34.000000000 -0400
51991 +++ linux-2.6.39.4/include/linux/decompress/mm.h 2011-08-05 19:44:37.000000000 -0400
51992 @@ -77,7 +77,7 @@ static void free(void *where)
51993 * warnings when not needed (indeed large_malloc / large_free are not
51994 * needed by inflate */
51995
51996 -#define malloc(a) kmalloc(a, GFP_KERNEL)
51997 +#define malloc(a) kmalloc((a), GFP_KERNEL)
51998 #define free(a) kfree(a)
51999
52000 #define large_malloc(a) vmalloc(a)
52001 diff -urNp linux-2.6.39.4/include/linux/dma-mapping.h linux-2.6.39.4/include/linux/dma-mapping.h
52002 --- linux-2.6.39.4/include/linux/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
52003 +++ linux-2.6.39.4/include/linux/dma-mapping.h 2011-08-05 20:34:06.000000000 -0400
52004 @@ -49,7 +49,7 @@ struct dma_map_ops {
52005 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
52006 int (*dma_supported)(struct device *dev, u64 mask);
52007 int (*set_dma_mask)(struct device *dev, u64 mask);
52008 - int is_phys;
52009 + const int is_phys;
52010 };
52011
52012 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
52013 diff -urNp linux-2.6.39.4/include/linux/efi.h linux-2.6.39.4/include/linux/efi.h
52014 --- linux-2.6.39.4/include/linux/efi.h 2011-06-25 12:55:23.000000000 -0400
52015 +++ linux-2.6.39.4/include/linux/efi.h 2011-08-05 20:34:06.000000000 -0400
52016 @@ -409,7 +409,7 @@ struct efivar_operations {
52017 efi_get_variable_t *get_variable;
52018 efi_get_next_variable_t *get_next_variable;
52019 efi_set_variable_t *set_variable;
52020 -};
52021 +} __no_const;
52022
52023 struct efivars {
52024 /*
52025 diff -urNp linux-2.6.39.4/include/linux/elf.h linux-2.6.39.4/include/linux/elf.h
52026 --- linux-2.6.39.4/include/linux/elf.h 2011-05-19 00:06:34.000000000 -0400
52027 +++ linux-2.6.39.4/include/linux/elf.h 2011-08-05 19:44:37.000000000 -0400
52028 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
52029 #define PT_GNU_EH_FRAME 0x6474e550
52030
52031 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
52032 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
52033 +
52034 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
52035 +
52036 +/* Constants for the e_flags field */
52037 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
52038 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
52039 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
52040 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
52041 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
52042 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
52043
52044 /*
52045 * Extended Numbering
52046 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
52047 #define DT_DEBUG 21
52048 #define DT_TEXTREL 22
52049 #define DT_JMPREL 23
52050 +#define DT_FLAGS 30
52051 + #define DF_TEXTREL 0x00000004
52052 #define DT_ENCODING 32
52053 #define OLD_DT_LOOS 0x60000000
52054 #define DT_LOOS 0x6000000d
52055 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
52056 #define PF_W 0x2
52057 #define PF_X 0x1
52058
52059 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
52060 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
52061 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
52062 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
52063 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
52064 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
52065 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
52066 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
52067 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
52068 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
52069 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
52070 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
52071 +
52072 typedef struct elf32_phdr{
52073 Elf32_Word p_type;
52074 Elf32_Off p_offset;
52075 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
52076 #define EI_OSABI 7
52077 #define EI_PAD 8
52078
52079 +#define EI_PAX 14
52080 +
52081 #define ELFMAG0 0x7f /* EI_MAG */
52082 #define ELFMAG1 'E'
52083 #define ELFMAG2 'L'
52084 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
52085 #define elf_note elf32_note
52086 #define elf_addr_t Elf32_Off
52087 #define Elf_Half Elf32_Half
52088 +#define elf_dyn Elf32_Dyn
52089
52090 #else
52091
52092 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
52093 #define elf_note elf64_note
52094 #define elf_addr_t Elf64_Off
52095 #define Elf_Half Elf64_Half
52096 +#define elf_dyn Elf64_Dyn
52097
52098 #endif
52099
52100 diff -urNp linux-2.6.39.4/include/linux/firewire.h linux-2.6.39.4/include/linux/firewire.h
52101 --- linux-2.6.39.4/include/linux/firewire.h 2011-05-19 00:06:34.000000000 -0400
52102 +++ linux-2.6.39.4/include/linux/firewire.h 2011-08-05 20:34:06.000000000 -0400
52103 @@ -429,7 +429,7 @@ struct fw_iso_context {
52104 union {
52105 fw_iso_callback_t sc;
52106 fw_iso_mc_callback_t mc;
52107 - } callback;
52108 + } __no_const callback;
52109 void *callback_data;
52110 };
52111
52112 diff -urNp linux-2.6.39.4/include/linux/fscache-cache.h linux-2.6.39.4/include/linux/fscache-cache.h
52113 --- linux-2.6.39.4/include/linux/fscache-cache.h 2011-05-19 00:06:34.000000000 -0400
52114 +++ linux-2.6.39.4/include/linux/fscache-cache.h 2011-08-05 19:44:37.000000000 -0400
52115 @@ -113,7 +113,7 @@ struct fscache_operation {
52116 #endif
52117 };
52118
52119 -extern atomic_t fscache_op_debug_id;
52120 +extern atomic_unchecked_t fscache_op_debug_id;
52121 extern void fscache_op_work_func(struct work_struct *work);
52122
52123 extern void fscache_enqueue_operation(struct fscache_operation *);
52124 @@ -133,7 +133,7 @@ static inline void fscache_operation_ini
52125 {
52126 INIT_WORK(&op->work, fscache_op_work_func);
52127 atomic_set(&op->usage, 1);
52128 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
52129 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52130 op->processor = processor;
52131 op->release = release;
52132 INIT_LIST_HEAD(&op->pend_link);
52133 diff -urNp linux-2.6.39.4/include/linux/fs.h linux-2.6.39.4/include/linux/fs.h
52134 --- linux-2.6.39.4/include/linux/fs.h 2011-05-19 00:06:34.000000000 -0400
52135 +++ linux-2.6.39.4/include/linux/fs.h 2011-08-05 20:34:06.000000000 -0400
52136 @@ -108,6 +108,11 @@ struct inodes_stat_t {
52137 /* File was opened by fanotify and shouldn't generate fanotify events */
52138 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
52139
52140 +/* Hack for grsec so as not to require read permission simply to execute
52141 + * a binary
52142 + */
52143 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
52144 +
52145 /*
52146 * The below are the various read and write types that we support. Some of
52147 * them include behavioral modifiers that send information down to the
52148 @@ -1535,7 +1540,7 @@ struct block_device_operations;
52149 * the big kernel lock held in all filesystems.
52150 */
52151 struct file_operations {
52152 - struct module *owner;
52153 + struct module * const owner;
52154 loff_t (*llseek) (struct file *, loff_t, int);
52155 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
52156 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
52157 @@ -1563,6 +1568,7 @@ struct file_operations {
52158 long (*fallocate)(struct file *file, int mode, loff_t offset,
52159 loff_t len);
52160 };
52161 +typedef struct file_operations __no_const file_operations_no_const;
52162
52163 #define IPERM_FLAG_RCU 0x0001
52164
52165 diff -urNp linux-2.6.39.4/include/linux/fs_struct.h linux-2.6.39.4/include/linux/fs_struct.h
52166 --- linux-2.6.39.4/include/linux/fs_struct.h 2011-05-19 00:06:34.000000000 -0400
52167 +++ linux-2.6.39.4/include/linux/fs_struct.h 2011-08-05 19:44:37.000000000 -0400
52168 @@ -6,7 +6,7 @@
52169 #include <linux/seqlock.h>
52170
52171 struct fs_struct {
52172 - int users;
52173 + atomic_t users;
52174 spinlock_t lock;
52175 seqcount_t seq;
52176 int umask;
52177 diff -urNp linux-2.6.39.4/include/linux/ftrace_event.h linux-2.6.39.4/include/linux/ftrace_event.h
52178 --- linux-2.6.39.4/include/linux/ftrace_event.h 2011-05-19 00:06:34.000000000 -0400
52179 +++ linux-2.6.39.4/include/linux/ftrace_event.h 2011-08-05 20:34:06.000000000 -0400
52180 @@ -84,7 +84,7 @@ struct trace_event_functions {
52181 trace_print_func raw;
52182 trace_print_func hex;
52183 trace_print_func binary;
52184 -};
52185 +} __no_const;
52186
52187 struct trace_event {
52188 struct hlist_node node;
52189 @@ -235,7 +235,7 @@ extern int trace_define_field(struct ftr
52190 extern int trace_add_event_call(struct ftrace_event_call *call);
52191 extern void trace_remove_event_call(struct ftrace_event_call *call);
52192
52193 -#define is_signed_type(type) (((type)(-1)) < 0)
52194 +#define is_signed_type(type) (((type)(-1)) < (type)1)
52195
52196 int trace_set_clr_event(const char *system, const char *event, int set);
52197
52198 diff -urNp linux-2.6.39.4/include/linux/genhd.h linux-2.6.39.4/include/linux/genhd.h
52199 --- linux-2.6.39.4/include/linux/genhd.h 2011-06-03 00:04:14.000000000 -0400
52200 +++ linux-2.6.39.4/include/linux/genhd.h 2011-08-05 19:44:37.000000000 -0400
52201 @@ -184,7 +184,7 @@ struct gendisk {
52202 struct kobject *slave_dir;
52203
52204 struct timer_rand_state *random;
52205 - atomic_t sync_io; /* RAID */
52206 + atomic_unchecked_t sync_io; /* RAID */
52207 struct disk_events *ev;
52208 #ifdef CONFIG_BLK_DEV_INTEGRITY
52209 struct blk_integrity *integrity;
52210 diff -urNp linux-2.6.39.4/include/linux/gracl.h linux-2.6.39.4/include/linux/gracl.h
52211 --- linux-2.6.39.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52212 +++ linux-2.6.39.4/include/linux/gracl.h 2011-08-05 19:44:37.000000000 -0400
52213 @@ -0,0 +1,317 @@
52214 +#ifndef GR_ACL_H
52215 +#define GR_ACL_H
52216 +
52217 +#include <linux/grdefs.h>
52218 +#include <linux/resource.h>
52219 +#include <linux/capability.h>
52220 +#include <linux/dcache.h>
52221 +#include <asm/resource.h>
52222 +
52223 +/* Major status information */
52224 +
52225 +#define GR_VERSION "grsecurity 2.2.2"
52226 +#define GRSECURITY_VERSION 0x2202
52227 +
52228 +enum {
52229 + GR_SHUTDOWN = 0,
52230 + GR_ENABLE = 1,
52231 + GR_SPROLE = 2,
52232 + GR_RELOAD = 3,
52233 + GR_SEGVMOD = 4,
52234 + GR_STATUS = 5,
52235 + GR_UNSPROLE = 6,
52236 + GR_PASSSET = 7,
52237 + GR_SPROLEPAM = 8,
52238 +};
52239 +
52240 +/* Password setup definitions
52241 + * kernel/grhash.c */
52242 +enum {
52243 + GR_PW_LEN = 128,
52244 + GR_SALT_LEN = 16,
52245 + GR_SHA_LEN = 32,
52246 +};
52247 +
52248 +enum {
52249 + GR_SPROLE_LEN = 64,
52250 +};
52251 +
52252 +enum {
52253 + GR_NO_GLOB = 0,
52254 + GR_REG_GLOB,
52255 + GR_CREATE_GLOB
52256 +};
52257 +
52258 +#define GR_NLIMITS 32
52259 +
52260 +/* Begin Data Structures */
52261 +
52262 +struct sprole_pw {
52263 + unsigned char *rolename;
52264 + unsigned char salt[GR_SALT_LEN];
52265 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52266 +};
52267 +
52268 +struct name_entry {
52269 + __u32 key;
52270 + ino_t inode;
52271 + dev_t device;
52272 + char *name;
52273 + __u16 len;
52274 + __u8 deleted;
52275 + struct name_entry *prev;
52276 + struct name_entry *next;
52277 +};
52278 +
52279 +struct inodev_entry {
52280 + struct name_entry *nentry;
52281 + struct inodev_entry *prev;
52282 + struct inodev_entry *next;
52283 +};
52284 +
52285 +struct acl_role_db {
52286 + struct acl_role_label **r_hash;
52287 + __u32 r_size;
52288 +};
52289 +
52290 +struct inodev_db {
52291 + struct inodev_entry **i_hash;
52292 + __u32 i_size;
52293 +};
52294 +
52295 +struct name_db {
52296 + struct name_entry **n_hash;
52297 + __u32 n_size;
52298 +};
52299 +
52300 +struct crash_uid {
52301 + uid_t uid;
52302 + unsigned long expires;
52303 +};
52304 +
52305 +struct gr_hash_struct {
52306 + void **table;
52307 + void **nametable;
52308 + void *first;
52309 + __u32 table_size;
52310 + __u32 used_size;
52311 + int type;
52312 +};
52313 +
52314 +/* Userspace Grsecurity ACL data structures */
52315 +
52316 +struct acl_subject_label {
52317 + char *filename;
52318 + ino_t inode;
52319 + dev_t device;
52320 + __u32 mode;
52321 + kernel_cap_t cap_mask;
52322 + kernel_cap_t cap_lower;
52323 + kernel_cap_t cap_invert_audit;
52324 +
52325 + struct rlimit res[GR_NLIMITS];
52326 + __u32 resmask;
52327 +
52328 + __u8 user_trans_type;
52329 + __u8 group_trans_type;
52330 + uid_t *user_transitions;
52331 + gid_t *group_transitions;
52332 + __u16 user_trans_num;
52333 + __u16 group_trans_num;
52334 +
52335 + __u32 sock_families[2];
52336 + __u32 ip_proto[8];
52337 + __u32 ip_type;
52338 + struct acl_ip_label **ips;
52339 + __u32 ip_num;
52340 + __u32 inaddr_any_override;
52341 +
52342 + __u32 crashes;
52343 + unsigned long expires;
52344 +
52345 + struct acl_subject_label *parent_subject;
52346 + struct gr_hash_struct *hash;
52347 + struct acl_subject_label *prev;
52348 + struct acl_subject_label *next;
52349 +
52350 + struct acl_object_label **obj_hash;
52351 + __u32 obj_hash_size;
52352 + __u16 pax_flags;
52353 +};
52354 +
52355 +struct role_allowed_ip {
52356 + __u32 addr;
52357 + __u32 netmask;
52358 +
52359 + struct role_allowed_ip *prev;
52360 + struct role_allowed_ip *next;
52361 +};
52362 +
52363 +struct role_transition {
52364 + char *rolename;
52365 +
52366 + struct role_transition *prev;
52367 + struct role_transition *next;
52368 +};
52369 +
52370 +struct acl_role_label {
52371 + char *rolename;
52372 + uid_t uidgid;
52373 + __u16 roletype;
52374 +
52375 + __u16 auth_attempts;
52376 + unsigned long expires;
52377 +
52378 + struct acl_subject_label *root_label;
52379 + struct gr_hash_struct *hash;
52380 +
52381 + struct acl_role_label *prev;
52382 + struct acl_role_label *next;
52383 +
52384 + struct role_transition *transitions;
52385 + struct role_allowed_ip *allowed_ips;
52386 + uid_t *domain_children;
52387 + __u16 domain_child_num;
52388 +
52389 + struct acl_subject_label **subj_hash;
52390 + __u32 subj_hash_size;
52391 +};
52392 +
52393 +struct user_acl_role_db {
52394 + struct acl_role_label **r_table;
52395 + __u32 num_pointers; /* Number of allocations to track */
52396 + __u32 num_roles; /* Number of roles */
52397 + __u32 num_domain_children; /* Number of domain children */
52398 + __u32 num_subjects; /* Number of subjects */
52399 + __u32 num_objects; /* Number of objects */
52400 +};
52401 +
52402 +struct acl_object_label {
52403 + char *filename;
52404 + ino_t inode;
52405 + dev_t device;
52406 + __u32 mode;
52407 +
52408 + struct acl_subject_label *nested;
52409 + struct acl_object_label *globbed;
52410 +
52411 + /* next two structures not used */
52412 +
52413 + struct acl_object_label *prev;
52414 + struct acl_object_label *next;
52415 +};
52416 +
52417 +struct acl_ip_label {
52418 + char *iface;
52419 + __u32 addr;
52420 + __u32 netmask;
52421 + __u16 low, high;
52422 + __u8 mode;
52423 + __u32 type;
52424 + __u32 proto[8];
52425 +
52426 + /* next two structures not used */
52427 +
52428 + struct acl_ip_label *prev;
52429 + struct acl_ip_label *next;
52430 +};
52431 +
52432 +struct gr_arg {
52433 + struct user_acl_role_db role_db;
52434 + unsigned char pw[GR_PW_LEN];
52435 + unsigned char salt[GR_SALT_LEN];
52436 + unsigned char sum[GR_SHA_LEN];
52437 + unsigned char sp_role[GR_SPROLE_LEN];
52438 + struct sprole_pw *sprole_pws;
52439 + dev_t segv_device;
52440 + ino_t segv_inode;
52441 + uid_t segv_uid;
52442 + __u16 num_sprole_pws;
52443 + __u16 mode;
52444 +};
52445 +
52446 +struct gr_arg_wrapper {
52447 + struct gr_arg *arg;
52448 + __u32 version;
52449 + __u32 size;
52450 +};
52451 +
52452 +struct subject_map {
52453 + struct acl_subject_label *user;
52454 + struct acl_subject_label *kernel;
52455 + struct subject_map *prev;
52456 + struct subject_map *next;
52457 +};
52458 +
52459 +struct acl_subj_map_db {
52460 + struct subject_map **s_hash;
52461 + __u32 s_size;
52462 +};
52463 +
52464 +/* End Data Structures Section */
52465 +
52466 +/* Hash functions generated by empirical testing by Brad Spengler
52467 + Makes good use of the low bits of the inode. Generally 0-1 times
52468 + in loop for successful match. 0-3 for unsuccessful match.
52469 + Shift/add algorithm with modulus of table size and an XOR*/
52470 +
52471 +static __inline__ unsigned int
52472 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52473 +{
52474 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
52475 +}
52476 +
52477 + static __inline__ unsigned int
52478 +shash(const struct acl_subject_label *userp, const unsigned int sz)
52479 +{
52480 + return ((const unsigned long)userp % sz);
52481 +}
52482 +
52483 +static __inline__ unsigned int
52484 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52485 +{
52486 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52487 +}
52488 +
52489 +static __inline__ unsigned int
52490 +nhash(const char *name, const __u16 len, const unsigned int sz)
52491 +{
52492 + return full_name_hash((const unsigned char *)name, len) % sz;
52493 +}
52494 +
52495 +#define FOR_EACH_ROLE_START(role) \
52496 + role = role_list; \
52497 + while (role) {
52498 +
52499 +#define FOR_EACH_ROLE_END(role) \
52500 + role = role->prev; \
52501 + }
52502 +
52503 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52504 + subj = NULL; \
52505 + iter = 0; \
52506 + while (iter < role->subj_hash_size) { \
52507 + if (subj == NULL) \
52508 + subj = role->subj_hash[iter]; \
52509 + if (subj == NULL) { \
52510 + iter++; \
52511 + continue; \
52512 + }
52513 +
52514 +#define FOR_EACH_SUBJECT_END(subj,iter) \
52515 + subj = subj->next; \
52516 + if (subj == NULL) \
52517 + iter++; \
52518 + }
52519 +
52520 +
52521 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52522 + subj = role->hash->first; \
52523 + while (subj != NULL) {
52524 +
52525 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52526 + subj = subj->next; \
52527 + }
52528 +
52529 +#endif
52530 +
52531 diff -urNp linux-2.6.39.4/include/linux/gralloc.h linux-2.6.39.4/include/linux/gralloc.h
52532 --- linux-2.6.39.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52533 +++ linux-2.6.39.4/include/linux/gralloc.h 2011-08-05 19:44:37.000000000 -0400
52534 @@ -0,0 +1,9 @@
52535 +#ifndef __GRALLOC_H
52536 +#define __GRALLOC_H
52537 +
52538 +void acl_free_all(void);
52539 +int acl_alloc_stack_init(unsigned long size);
52540 +void *acl_alloc(unsigned long len);
52541 +void *acl_alloc_num(unsigned long num, unsigned long len);
52542 +
52543 +#endif
52544 diff -urNp linux-2.6.39.4/include/linux/grdefs.h linux-2.6.39.4/include/linux/grdefs.h
52545 --- linux-2.6.39.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52546 +++ linux-2.6.39.4/include/linux/grdefs.h 2011-08-05 19:44:37.000000000 -0400
52547 @@ -0,0 +1,140 @@
52548 +#ifndef GRDEFS_H
52549 +#define GRDEFS_H
52550 +
52551 +/* Begin grsecurity status declarations */
52552 +
52553 +enum {
52554 + GR_READY = 0x01,
52555 + GR_STATUS_INIT = 0x00 // disabled state
52556 +};
52557 +
52558 +/* Begin ACL declarations */
52559 +
52560 +/* Role flags */
52561 +
52562 +enum {
52563 + GR_ROLE_USER = 0x0001,
52564 + GR_ROLE_GROUP = 0x0002,
52565 + GR_ROLE_DEFAULT = 0x0004,
52566 + GR_ROLE_SPECIAL = 0x0008,
52567 + GR_ROLE_AUTH = 0x0010,
52568 + GR_ROLE_NOPW = 0x0020,
52569 + GR_ROLE_GOD = 0x0040,
52570 + GR_ROLE_LEARN = 0x0080,
52571 + GR_ROLE_TPE = 0x0100,
52572 + GR_ROLE_DOMAIN = 0x0200,
52573 + GR_ROLE_PAM = 0x0400,
52574 + GR_ROLE_PERSIST = 0x0800
52575 +};
52576 +
52577 +/* ACL Subject and Object mode flags */
52578 +enum {
52579 + GR_DELETED = 0x80000000
52580 +};
52581 +
52582 +/* ACL Object-only mode flags */
52583 +enum {
52584 + GR_READ = 0x00000001,
52585 + GR_APPEND = 0x00000002,
52586 + GR_WRITE = 0x00000004,
52587 + GR_EXEC = 0x00000008,
52588 + GR_FIND = 0x00000010,
52589 + GR_INHERIT = 0x00000020,
52590 + GR_SETID = 0x00000040,
52591 + GR_CREATE = 0x00000080,
52592 + GR_DELETE = 0x00000100,
52593 + GR_LINK = 0x00000200,
52594 + GR_AUDIT_READ = 0x00000400,
52595 + GR_AUDIT_APPEND = 0x00000800,
52596 + GR_AUDIT_WRITE = 0x00001000,
52597 + GR_AUDIT_EXEC = 0x00002000,
52598 + GR_AUDIT_FIND = 0x00004000,
52599 + GR_AUDIT_INHERIT= 0x00008000,
52600 + GR_AUDIT_SETID = 0x00010000,
52601 + GR_AUDIT_CREATE = 0x00020000,
52602 + GR_AUDIT_DELETE = 0x00040000,
52603 + GR_AUDIT_LINK = 0x00080000,
52604 + GR_PTRACERD = 0x00100000,
52605 + GR_NOPTRACE = 0x00200000,
52606 + GR_SUPPRESS = 0x00400000,
52607 + GR_NOLEARN = 0x00800000,
52608 + GR_INIT_TRANSFER= 0x01000000
52609 +};
52610 +
52611 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52612 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52613 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52614 +
52615 +/* ACL subject-only mode flags */
52616 +enum {
52617 + GR_KILL = 0x00000001,
52618 + GR_VIEW = 0x00000002,
52619 + GR_PROTECTED = 0x00000004,
52620 + GR_LEARN = 0x00000008,
52621 + GR_OVERRIDE = 0x00000010,
52622 + /* just a placeholder, this mode is only used in userspace */
52623 + GR_DUMMY = 0x00000020,
52624 + GR_PROTSHM = 0x00000040,
52625 + GR_KILLPROC = 0x00000080,
52626 + GR_KILLIPPROC = 0x00000100,
52627 + /* just a placeholder, this mode is only used in userspace */
52628 + GR_NOTROJAN = 0x00000200,
52629 + GR_PROTPROCFD = 0x00000400,
52630 + GR_PROCACCT = 0x00000800,
52631 + GR_RELAXPTRACE = 0x00001000,
52632 + GR_NESTED = 0x00002000,
52633 + GR_INHERITLEARN = 0x00004000,
52634 + GR_PROCFIND = 0x00008000,
52635 + GR_POVERRIDE = 0x00010000,
52636 + GR_KERNELAUTH = 0x00020000,
52637 + GR_ATSECURE = 0x00040000,
52638 + GR_SHMEXEC = 0x00080000
52639 +};
52640 +
52641 +enum {
52642 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52643 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52644 + GR_PAX_ENABLE_MPROTECT = 0x0004,
52645 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
52646 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52647 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52648 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52649 + GR_PAX_DISABLE_MPROTECT = 0x0400,
52650 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
52651 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52652 +};
52653 +
52654 +enum {
52655 + GR_ID_USER = 0x01,
52656 + GR_ID_GROUP = 0x02,
52657 +};
52658 +
52659 +enum {
52660 + GR_ID_ALLOW = 0x01,
52661 + GR_ID_DENY = 0x02,
52662 +};
52663 +
52664 +#define GR_CRASH_RES 31
52665 +#define GR_UIDTABLE_MAX 500
52666 +
52667 +/* begin resource learning section */
52668 +enum {
52669 + GR_RLIM_CPU_BUMP = 60,
52670 + GR_RLIM_FSIZE_BUMP = 50000,
52671 + GR_RLIM_DATA_BUMP = 10000,
52672 + GR_RLIM_STACK_BUMP = 1000,
52673 + GR_RLIM_CORE_BUMP = 10000,
52674 + GR_RLIM_RSS_BUMP = 500000,
52675 + GR_RLIM_NPROC_BUMP = 1,
52676 + GR_RLIM_NOFILE_BUMP = 5,
52677 + GR_RLIM_MEMLOCK_BUMP = 50000,
52678 + GR_RLIM_AS_BUMP = 500000,
52679 + GR_RLIM_LOCKS_BUMP = 2,
52680 + GR_RLIM_SIGPENDING_BUMP = 5,
52681 + GR_RLIM_MSGQUEUE_BUMP = 10000,
52682 + GR_RLIM_NICE_BUMP = 1,
52683 + GR_RLIM_RTPRIO_BUMP = 1,
52684 + GR_RLIM_RTTIME_BUMP = 1000000
52685 +};
52686 +
52687 +#endif
52688 diff -urNp linux-2.6.39.4/include/linux/grinternal.h linux-2.6.39.4/include/linux/grinternal.h
52689 --- linux-2.6.39.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52690 +++ linux-2.6.39.4/include/linux/grinternal.h 2011-08-05 19:44:37.000000000 -0400
52691 @@ -0,0 +1,219 @@
52692 +#ifndef __GRINTERNAL_H
52693 +#define __GRINTERNAL_H
52694 +
52695 +#ifdef CONFIG_GRKERNSEC
52696 +
52697 +#include <linux/fs.h>
52698 +#include <linux/mnt_namespace.h>
52699 +#include <linux/nsproxy.h>
52700 +#include <linux/gracl.h>
52701 +#include <linux/grdefs.h>
52702 +#include <linux/grmsg.h>
52703 +
52704 +void gr_add_learn_entry(const char *fmt, ...)
52705 + __attribute__ ((format (printf, 1, 2)));
52706 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52707 + const struct vfsmount *mnt);
52708 +__u32 gr_check_create(const struct dentry *new_dentry,
52709 + const struct dentry *parent,
52710 + const struct vfsmount *mnt, const __u32 mode);
52711 +int gr_check_protected_task(const struct task_struct *task);
52712 +__u32 to_gr_audit(const __u32 reqmode);
52713 +int gr_set_acls(const int type);
52714 +int gr_apply_subject_to_task(struct task_struct *task);
52715 +int gr_acl_is_enabled(void);
52716 +char gr_roletype_to_char(void);
52717 +
52718 +void gr_handle_alertkill(struct task_struct *task);
52719 +char *gr_to_filename(const struct dentry *dentry,
52720 + const struct vfsmount *mnt);
52721 +char *gr_to_filename1(const struct dentry *dentry,
52722 + const struct vfsmount *mnt);
52723 +char *gr_to_filename2(const struct dentry *dentry,
52724 + const struct vfsmount *mnt);
52725 +char *gr_to_filename3(const struct dentry *dentry,
52726 + const struct vfsmount *mnt);
52727 +
52728 +extern int grsec_enable_harden_ptrace;
52729 +extern int grsec_enable_link;
52730 +extern int grsec_enable_fifo;
52731 +extern int grsec_enable_execve;
52732 +extern int grsec_enable_shm;
52733 +extern int grsec_enable_execlog;
52734 +extern int grsec_enable_signal;
52735 +extern int grsec_enable_audit_ptrace;
52736 +extern int grsec_enable_forkfail;
52737 +extern int grsec_enable_time;
52738 +extern int grsec_enable_rofs;
52739 +extern int grsec_enable_chroot_shmat;
52740 +extern int grsec_enable_chroot_mount;
52741 +extern int grsec_enable_chroot_double;
52742 +extern int grsec_enable_chroot_pivot;
52743 +extern int grsec_enable_chroot_chdir;
52744 +extern int grsec_enable_chroot_chmod;
52745 +extern int grsec_enable_chroot_mknod;
52746 +extern int grsec_enable_chroot_fchdir;
52747 +extern int grsec_enable_chroot_nice;
52748 +extern int grsec_enable_chroot_execlog;
52749 +extern int grsec_enable_chroot_caps;
52750 +extern int grsec_enable_chroot_sysctl;
52751 +extern int grsec_enable_chroot_unix;
52752 +extern int grsec_enable_tpe;
52753 +extern int grsec_tpe_gid;
52754 +extern int grsec_enable_tpe_all;
52755 +extern int grsec_enable_tpe_invert;
52756 +extern int grsec_enable_socket_all;
52757 +extern int grsec_socket_all_gid;
52758 +extern int grsec_enable_socket_client;
52759 +extern int grsec_socket_client_gid;
52760 +extern int grsec_enable_socket_server;
52761 +extern int grsec_socket_server_gid;
52762 +extern int grsec_audit_gid;
52763 +extern int grsec_enable_group;
52764 +extern int grsec_enable_audit_textrel;
52765 +extern int grsec_enable_log_rwxmaps;
52766 +extern int grsec_enable_mount;
52767 +extern int grsec_enable_chdir;
52768 +extern int grsec_resource_logging;
52769 +extern int grsec_enable_blackhole;
52770 +extern int grsec_lastack_retries;
52771 +extern int grsec_enable_brute;
52772 +extern int grsec_lock;
52773 +
52774 +extern spinlock_t grsec_alert_lock;
52775 +extern unsigned long grsec_alert_wtime;
52776 +extern unsigned long grsec_alert_fyet;
52777 +
52778 +extern spinlock_t grsec_audit_lock;
52779 +
52780 +extern rwlock_t grsec_exec_file_lock;
52781 +
52782 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52783 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52784 + (tsk)->exec_file->f_vfsmnt) : "/")
52785 +
52786 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52787 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52788 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52789 +
52790 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52791 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
52792 + (tsk)->exec_file->f_vfsmnt) : "/")
52793 +
52794 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52795 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52796 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52797 +
52798 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52799 +
52800 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52801 +
52802 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52803 + (task)->pid, (cred)->uid, \
52804 + (cred)->euid, (cred)->gid, (cred)->egid, \
52805 + gr_parent_task_fullpath(task), \
52806 + (task)->real_parent->comm, (task)->real_parent->pid, \
52807 + (pcred)->uid, (pcred)->euid, \
52808 + (pcred)->gid, (pcred)->egid
52809 +
52810 +#define GR_CHROOT_CAPS {{ \
52811 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52812 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52813 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52814 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52815 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52816 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52817 +
52818 +#define security_learn(normal_msg,args...) \
52819 +({ \
52820 + read_lock(&grsec_exec_file_lock); \
52821 + gr_add_learn_entry(normal_msg "\n", ## args); \
52822 + read_unlock(&grsec_exec_file_lock); \
52823 +})
52824 +
52825 +enum {
52826 + GR_DO_AUDIT,
52827 + GR_DONT_AUDIT,
52828 + /* used for non-audit messages that we shouldn't kill the task on */
52829 + GR_DONT_AUDIT_GOOD
52830 +};
52831 +
52832 +enum {
52833 + GR_TTYSNIFF,
52834 + GR_RBAC,
52835 + GR_RBAC_STR,
52836 + GR_STR_RBAC,
52837 + GR_RBAC_MODE2,
52838 + GR_RBAC_MODE3,
52839 + GR_FILENAME,
52840 + GR_SYSCTL_HIDDEN,
52841 + GR_NOARGS,
52842 + GR_ONE_INT,
52843 + GR_ONE_INT_TWO_STR,
52844 + GR_ONE_STR,
52845 + GR_STR_INT,
52846 + GR_TWO_STR_INT,
52847 + GR_TWO_INT,
52848 + GR_TWO_U64,
52849 + GR_THREE_INT,
52850 + GR_FIVE_INT_TWO_STR,
52851 + GR_TWO_STR,
52852 + GR_THREE_STR,
52853 + GR_FOUR_STR,
52854 + GR_STR_FILENAME,
52855 + GR_FILENAME_STR,
52856 + GR_FILENAME_TWO_INT,
52857 + GR_FILENAME_TWO_INT_STR,
52858 + GR_TEXTREL,
52859 + GR_PTRACE,
52860 + GR_RESOURCE,
52861 + GR_CAP,
52862 + GR_SIG,
52863 + GR_SIG2,
52864 + GR_CRASH1,
52865 + GR_CRASH2,
52866 + GR_PSACCT,
52867 + GR_RWXMAP
52868 +};
52869 +
52870 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52871 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52872 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52873 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52874 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52875 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52876 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52877 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52878 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52879 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52880 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52881 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52882 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52883 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52884 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52885 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52886 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52887 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52888 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52889 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52890 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52891 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52892 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52893 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52894 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52895 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52896 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52897 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52898 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52899 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52900 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52901 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52902 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52903 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52904 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52905 +
52906 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52907 +
52908 +#endif
52909 +
52910 +#endif
52911 diff -urNp linux-2.6.39.4/include/linux/grmsg.h linux-2.6.39.4/include/linux/grmsg.h
52912 --- linux-2.6.39.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52913 +++ linux-2.6.39.4/include/linux/grmsg.h 2011-08-05 19:44:37.000000000 -0400
52914 @@ -0,0 +1,108 @@
52915 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52916 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52917 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52918 +#define GR_STOPMOD_MSG "denied modification of module state by "
52919 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52920 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52921 +#define GR_IOPERM_MSG "denied use of ioperm() by "
52922 +#define GR_IOPL_MSG "denied use of iopl() by "
52923 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52924 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52925 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52926 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52927 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52928 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52929 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52930 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52931 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52932 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52933 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52934 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52935 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52936 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52937 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52938 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52939 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52940 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52941 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52942 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52943 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52944 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52945 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52946 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52947 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52948 +#define GR_NPROC_MSG "denied overstep of process limit by "
52949 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52950 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52951 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52952 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52953 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52954 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52955 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52956 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52957 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52958 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52959 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52960 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52961 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52962 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52963 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52964 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52965 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52966 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52967 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52968 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52969 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52970 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52971 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52972 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52973 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52974 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52975 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52976 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52977 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52978 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52979 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52980 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52981 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52982 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52983 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52984 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52985 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52986 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52987 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52988 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
52989 +#define GR_NICE_CHROOT_MSG "denied priority change by "
52990 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52991 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52992 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52993 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52994 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52995 +#define GR_TIME_MSG "time set by "
52996 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52997 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52998 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52999 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
53000 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
53001 +#define GR_BIND_MSG "denied bind() by "
53002 +#define GR_CONNECT_MSG "denied connect() by "
53003 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
53004 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
53005 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
53006 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
53007 +#define GR_CAP_ACL_MSG "use of %s denied for "
53008 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
53009 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
53010 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
53011 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
53012 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
53013 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
53014 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
53015 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
53016 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
53017 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
53018 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
53019 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
53020 +#define GR_VM86_MSG "denied use of vm86 by "
53021 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
53022 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
53023 diff -urNp linux-2.6.39.4/include/linux/grsecurity.h linux-2.6.39.4/include/linux/grsecurity.h
53024 --- linux-2.6.39.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
53025 +++ linux-2.6.39.4/include/linux/grsecurity.h 2011-08-05 19:54:17.000000000 -0400
53026 @@ -0,0 +1,218 @@
53027 +#ifndef GR_SECURITY_H
53028 +#define GR_SECURITY_H
53029 +#include <linux/fs.h>
53030 +#include <linux/fs_struct.h>
53031 +#include <linux/binfmts.h>
53032 +#include <linux/gracl.h>
53033 +#include <linux/compat.h>
53034 +
53035 +/* notify of brain-dead configs */
53036 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53037 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
53038 +#endif
53039 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
53040 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
53041 +#endif
53042 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53043 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53044 +#endif
53045 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53046 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53047 +#endif
53048 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
53049 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
53050 +#endif
53051 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
53052 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
53053 +#endif
53054 +
53055 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
53056 +void gr_handle_brute_check(void);
53057 +void gr_handle_kernel_exploit(void);
53058 +int gr_process_user_ban(void);
53059 +
53060 +char gr_roletype_to_char(void);
53061 +
53062 +int gr_acl_enable_at_secure(void);
53063 +
53064 +int gr_check_user_change(int real, int effective, int fs);
53065 +int gr_check_group_change(int real, int effective, int fs);
53066 +
53067 +void gr_del_task_from_ip_table(struct task_struct *p);
53068 +
53069 +int gr_pid_is_chrooted(struct task_struct *p);
53070 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
53071 +int gr_handle_chroot_nice(void);
53072 +int gr_handle_chroot_sysctl(const int op);
53073 +int gr_handle_chroot_setpriority(struct task_struct *p,
53074 + const int niceval);
53075 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
53076 +int gr_handle_chroot_chroot(const struct dentry *dentry,
53077 + const struct vfsmount *mnt);
53078 +int gr_handle_chroot_caps(struct path *path);
53079 +void gr_handle_chroot_chdir(struct path *path);
53080 +int gr_handle_chroot_chmod(const struct dentry *dentry,
53081 + const struct vfsmount *mnt, const int mode);
53082 +int gr_handle_chroot_mknod(const struct dentry *dentry,
53083 + const struct vfsmount *mnt, const int mode);
53084 +int gr_handle_chroot_mount(const struct dentry *dentry,
53085 + const struct vfsmount *mnt,
53086 + const char *dev_name);
53087 +int gr_handle_chroot_pivot(void);
53088 +int gr_handle_chroot_unix(const pid_t pid);
53089 +
53090 +int gr_handle_rawio(const struct inode *inode);
53091 +int gr_handle_nproc(void);
53092 +
53093 +void gr_handle_ioperm(void);
53094 +void gr_handle_iopl(void);
53095 +
53096 +int gr_tpe_allow(const struct file *file);
53097 +
53098 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
53099 +void gr_clear_chroot_entries(struct task_struct *task);
53100 +
53101 +void gr_log_forkfail(const int retval);
53102 +void gr_log_timechange(void);
53103 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
53104 +void gr_log_chdir(const struct dentry *dentry,
53105 + const struct vfsmount *mnt);
53106 +void gr_log_chroot_exec(const struct dentry *dentry,
53107 + const struct vfsmount *mnt);
53108 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
53109 +#ifdef CONFIG_COMPAT
53110 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
53111 +#endif
53112 +void gr_log_remount(const char *devname, const int retval);
53113 +void gr_log_unmount(const char *devname, const int retval);
53114 +void gr_log_mount(const char *from, const char *to, const int retval);
53115 +void gr_log_textrel(struct vm_area_struct *vma);
53116 +void gr_log_rwxmmap(struct file *file);
53117 +void gr_log_rwxmprotect(struct file *file);
53118 +
53119 +int gr_handle_follow_link(const struct inode *parent,
53120 + const struct inode *inode,
53121 + const struct dentry *dentry,
53122 + const struct vfsmount *mnt);
53123 +int gr_handle_fifo(const struct dentry *dentry,
53124 + const struct vfsmount *mnt,
53125 + const struct dentry *dir, const int flag,
53126 + const int acc_mode);
53127 +int gr_handle_hardlink(const struct dentry *dentry,
53128 + const struct vfsmount *mnt,
53129 + struct inode *inode,
53130 + const int mode, const char *to);
53131 +
53132 +int gr_is_capable(const int cap);
53133 +int gr_is_capable_nolog(const int cap);
53134 +void gr_learn_resource(const struct task_struct *task, const int limit,
53135 + const unsigned long wanted, const int gt);
53136 +void gr_copy_label(struct task_struct *tsk);
53137 +void gr_handle_crash(struct task_struct *task, const int sig);
53138 +int gr_handle_signal(const struct task_struct *p, const int sig);
53139 +int gr_check_crash_uid(const uid_t uid);
53140 +int gr_check_protected_task(const struct task_struct *task);
53141 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
53142 +int gr_acl_handle_mmap(const struct file *file,
53143 + const unsigned long prot);
53144 +int gr_acl_handle_mprotect(const struct file *file,
53145 + const unsigned long prot);
53146 +int gr_check_hidden_task(const struct task_struct *tsk);
53147 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
53148 + const struct vfsmount *mnt);
53149 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
53150 + const struct vfsmount *mnt);
53151 +__u32 gr_acl_handle_access(const struct dentry *dentry,
53152 + const struct vfsmount *mnt, const int fmode);
53153 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
53154 + const struct vfsmount *mnt, mode_t mode);
53155 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
53156 + const struct vfsmount *mnt, mode_t mode);
53157 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
53158 + const struct vfsmount *mnt);
53159 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
53160 + const struct vfsmount *mnt);
53161 +int gr_handle_ptrace(struct task_struct *task, const long request);
53162 +int gr_handle_proc_ptrace(struct task_struct *task);
53163 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
53164 + const struct vfsmount *mnt);
53165 +int gr_check_crash_exec(const struct file *filp);
53166 +int gr_acl_is_enabled(void);
53167 +void gr_set_kernel_label(struct task_struct *task);
53168 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
53169 + const gid_t gid);
53170 +int gr_set_proc_label(const struct dentry *dentry,
53171 + const struct vfsmount *mnt,
53172 + const int unsafe_share);
53173 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
53174 + const struct vfsmount *mnt);
53175 +__u32 gr_acl_handle_open(const struct dentry *dentry,
53176 + const struct vfsmount *mnt, const int fmode);
53177 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
53178 + const struct dentry *p_dentry,
53179 + const struct vfsmount *p_mnt, const int fmode,
53180 + const int imode);
53181 +void gr_handle_create(const struct dentry *dentry,
53182 + const struct vfsmount *mnt);
53183 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
53184 + const struct dentry *parent_dentry,
53185 + const struct vfsmount *parent_mnt,
53186 + const int mode);
53187 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
53188 + const struct dentry *parent_dentry,
53189 + const struct vfsmount *parent_mnt);
53190 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
53191 + const struct vfsmount *mnt);
53192 +void gr_handle_delete(const ino_t ino, const dev_t dev);
53193 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
53194 + const struct vfsmount *mnt);
53195 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53196 + const struct dentry *parent_dentry,
53197 + const struct vfsmount *parent_mnt,
53198 + const char *from);
53199 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53200 + const struct dentry *parent_dentry,
53201 + const struct vfsmount *parent_mnt,
53202 + const struct dentry *old_dentry,
53203 + const struct vfsmount *old_mnt, const char *to);
53204 +int gr_acl_handle_rename(struct dentry *new_dentry,
53205 + struct dentry *parent_dentry,
53206 + const struct vfsmount *parent_mnt,
53207 + struct dentry *old_dentry,
53208 + struct inode *old_parent_inode,
53209 + struct vfsmount *old_mnt, const char *newname);
53210 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53211 + struct dentry *old_dentry,
53212 + struct dentry *new_dentry,
53213 + struct vfsmount *mnt, const __u8 replace);
53214 +__u32 gr_check_link(const struct dentry *new_dentry,
53215 + const struct dentry *parent_dentry,
53216 + const struct vfsmount *parent_mnt,
53217 + const struct dentry *old_dentry,
53218 + const struct vfsmount *old_mnt);
53219 +int gr_acl_handle_filldir(const struct file *file, const char *name,
53220 + const unsigned int namelen, const ino_t ino);
53221 +
53222 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
53223 + const struct vfsmount *mnt);
53224 +void gr_acl_handle_exit(void);
53225 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
53226 +int gr_acl_handle_procpidmem(const struct task_struct *task);
53227 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53228 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53229 +void gr_audit_ptrace(struct task_struct *task);
53230 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53231 +
53232 +#ifdef CONFIG_GRKERNSEC
53233 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53234 +void gr_handle_vm86(void);
53235 +void gr_handle_mem_readwrite(u64 from, u64 to);
53236 +
53237 +extern int grsec_enable_dmesg;
53238 +extern int grsec_disable_privio;
53239 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53240 +extern int grsec_enable_chroot_findtask;
53241 +#endif
53242 +#endif
53243 +
53244 +#endif
53245 diff -urNp linux-2.6.39.4/include/linux/grsock.h linux-2.6.39.4/include/linux/grsock.h
53246 --- linux-2.6.39.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53247 +++ linux-2.6.39.4/include/linux/grsock.h 2011-08-05 19:44:37.000000000 -0400
53248 @@ -0,0 +1,19 @@
53249 +#ifndef __GRSOCK_H
53250 +#define __GRSOCK_H
53251 +
53252 +extern void gr_attach_curr_ip(const struct sock *sk);
53253 +extern int gr_handle_sock_all(const int family, const int type,
53254 + const int protocol);
53255 +extern int gr_handle_sock_server(const struct sockaddr *sck);
53256 +extern int gr_handle_sock_server_other(const struct sock *sck);
53257 +extern int gr_handle_sock_client(const struct sockaddr *sck);
53258 +extern int gr_search_connect(struct socket * sock,
53259 + struct sockaddr_in * addr);
53260 +extern int gr_search_bind(struct socket * sock,
53261 + struct sockaddr_in * addr);
53262 +extern int gr_search_listen(struct socket * sock);
53263 +extern int gr_search_accept(struct socket * sock);
53264 +extern int gr_search_socket(const int domain, const int type,
53265 + const int protocol);
53266 +
53267 +#endif
53268 diff -urNp linux-2.6.39.4/include/linux/highmem.h linux-2.6.39.4/include/linux/highmem.h
53269 --- linux-2.6.39.4/include/linux/highmem.h 2011-05-19 00:06:34.000000000 -0400
53270 +++ linux-2.6.39.4/include/linux/highmem.h 2011-08-05 19:44:37.000000000 -0400
53271 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53272 kunmap_atomic(kaddr, KM_USER0);
53273 }
53274
53275 +static inline void sanitize_highpage(struct page *page)
53276 +{
53277 + void *kaddr;
53278 + unsigned long flags;
53279 +
53280 + local_irq_save(flags);
53281 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
53282 + clear_page(kaddr);
53283 + kunmap_atomic(kaddr, KM_CLEARPAGE);
53284 + local_irq_restore(flags);
53285 +}
53286 +
53287 static inline void zero_user_segments(struct page *page,
53288 unsigned start1, unsigned end1,
53289 unsigned start2, unsigned end2)
53290 diff -urNp linux-2.6.39.4/include/linux/i2c.h linux-2.6.39.4/include/linux/i2c.h
53291 --- linux-2.6.39.4/include/linux/i2c.h 2011-05-19 00:06:34.000000000 -0400
53292 +++ linux-2.6.39.4/include/linux/i2c.h 2011-08-05 20:34:06.000000000 -0400
53293 @@ -346,6 +346,7 @@ struct i2c_algorithm {
53294 /* To determine what the adapter supports */
53295 u32 (*functionality) (struct i2c_adapter *);
53296 };
53297 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53298
53299 /*
53300 * i2c_adapter is the structure used to identify a physical i2c bus along
53301 diff -urNp linux-2.6.39.4/include/linux/i2o.h linux-2.6.39.4/include/linux/i2o.h
53302 --- linux-2.6.39.4/include/linux/i2o.h 2011-05-19 00:06:34.000000000 -0400
53303 +++ linux-2.6.39.4/include/linux/i2o.h 2011-08-05 19:44:37.000000000 -0400
53304 @@ -564,7 +564,7 @@ struct i2o_controller {
53305 struct i2o_device *exec; /* Executive */
53306 #if BITS_PER_LONG == 64
53307 spinlock_t context_list_lock; /* lock for context_list */
53308 - atomic_t context_list_counter; /* needed for unique contexts */
53309 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53310 struct list_head context_list; /* list of context id's
53311 and pointers */
53312 #endif
53313 diff -urNp linux-2.6.39.4/include/linux/init.h linux-2.6.39.4/include/linux/init.h
53314 --- linux-2.6.39.4/include/linux/init.h 2011-05-19 00:06:34.000000000 -0400
53315 +++ linux-2.6.39.4/include/linux/init.h 2011-08-05 19:44:37.000000000 -0400
53316 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53317
53318 /* Each module must use one module_init(). */
53319 #define module_init(initfn) \
53320 - static inline initcall_t __inittest(void) \
53321 + static inline __used initcall_t __inittest(void) \
53322 { return initfn; } \
53323 int init_module(void) __attribute__((alias(#initfn)));
53324
53325 /* This is only required if you want to be unloadable. */
53326 #define module_exit(exitfn) \
53327 - static inline exitcall_t __exittest(void) \
53328 + static inline __used exitcall_t __exittest(void) \
53329 { return exitfn; } \
53330 void cleanup_module(void) __attribute__((alias(#exitfn)));
53331
53332 diff -urNp linux-2.6.39.4/include/linux/init_task.h linux-2.6.39.4/include/linux/init_task.h
53333 --- linux-2.6.39.4/include/linux/init_task.h 2011-05-19 00:06:34.000000000 -0400
53334 +++ linux-2.6.39.4/include/linux/init_task.h 2011-08-05 19:44:37.000000000 -0400
53335 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
53336 #define INIT_IDS
53337 #endif
53338
53339 +#ifdef CONFIG_X86
53340 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53341 +#else
53342 +#define INIT_TASK_THREAD_INFO
53343 +#endif
53344 +
53345 /*
53346 * Because of the reduced scope of CAP_SETPCAP when filesystem
53347 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
53348 @@ -163,6 +169,7 @@ extern struct cred init_cred;
53349 RCU_INIT_POINTER(.cred, &init_cred), \
53350 .comm = "swapper", \
53351 .thread = INIT_THREAD, \
53352 + INIT_TASK_THREAD_INFO \
53353 .fs = &init_fs, \
53354 .files = &init_files, \
53355 .signal = &init_signals, \
53356 diff -urNp linux-2.6.39.4/include/linux/intel-iommu.h linux-2.6.39.4/include/linux/intel-iommu.h
53357 --- linux-2.6.39.4/include/linux/intel-iommu.h 2011-05-19 00:06:34.000000000 -0400
53358 +++ linux-2.6.39.4/include/linux/intel-iommu.h 2011-08-05 20:34:06.000000000 -0400
53359 @@ -296,7 +296,7 @@ struct iommu_flush {
53360 u8 fm, u64 type);
53361 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53362 unsigned int size_order, u64 type);
53363 -};
53364 +} __no_const;
53365
53366 enum {
53367 SR_DMAR_FECTL_REG,
53368 diff -urNp linux-2.6.39.4/include/linux/interrupt.h linux-2.6.39.4/include/linux/interrupt.h
53369 --- linux-2.6.39.4/include/linux/interrupt.h 2011-05-19 00:06:34.000000000 -0400
53370 +++ linux-2.6.39.4/include/linux/interrupt.h 2011-08-05 19:44:37.000000000 -0400
53371 @@ -422,7 +422,7 @@ enum
53372 /* map softirq index to softirq name. update 'softirq_to_name' in
53373 * kernel/softirq.c when adding a new softirq.
53374 */
53375 -extern char *softirq_to_name[NR_SOFTIRQS];
53376 +extern const char * const softirq_to_name[NR_SOFTIRQS];
53377
53378 /* softirq mask and active fields moved to irq_cpustat_t in
53379 * asm/hardirq.h to get better cache usage. KAO
53380 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53381
53382 struct softirq_action
53383 {
53384 - void (*action)(struct softirq_action *);
53385 + void (*action)(void);
53386 };
53387
53388 asmlinkage void do_softirq(void);
53389 asmlinkage void __do_softirq(void);
53390 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53391 +extern void open_softirq(int nr, void (*action)(void));
53392 extern void softirq_init(void);
53393 static inline void __raise_softirq_irqoff(unsigned int nr)
53394 {
53395 diff -urNp linux-2.6.39.4/include/linux/kallsyms.h linux-2.6.39.4/include/linux/kallsyms.h
53396 --- linux-2.6.39.4/include/linux/kallsyms.h 2011-05-19 00:06:34.000000000 -0400
53397 +++ linux-2.6.39.4/include/linux/kallsyms.h 2011-08-05 19:44:37.000000000 -0400
53398 @@ -15,7 +15,8 @@
53399
53400 struct module;
53401
53402 -#ifdef CONFIG_KALLSYMS
53403 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53404 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53405 /* Lookup the address for a symbol. Returns 0 if not found. */
53406 unsigned long kallsyms_lookup_name(const char *name);
53407
53408 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53409 /* Stupid that this does nothing, but I didn't create this mess. */
53410 #define __print_symbol(fmt, addr)
53411 #endif /*CONFIG_KALLSYMS*/
53412 +#else /* when included by kallsyms.c, vsnprintf.c, or
53413 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53414 +extern void __print_symbol(const char *fmt, unsigned long address);
53415 +extern int sprint_backtrace(char *buffer, unsigned long address);
53416 +extern int sprint_symbol(char *buffer, unsigned long address);
53417 +const char *kallsyms_lookup(unsigned long addr,
53418 + unsigned long *symbolsize,
53419 + unsigned long *offset,
53420 + char **modname, char *namebuf);
53421 +#endif
53422
53423 /* This macro allows us to keep printk typechecking */
53424 static void __check_printsym_format(const char *fmt, ...)
53425 diff -urNp linux-2.6.39.4/include/linux/kgdb.h linux-2.6.39.4/include/linux/kgdb.h
53426 --- linux-2.6.39.4/include/linux/kgdb.h 2011-05-19 00:06:34.000000000 -0400
53427 +++ linux-2.6.39.4/include/linux/kgdb.h 2011-08-05 20:34:06.000000000 -0400
53428 @@ -53,7 +53,7 @@ extern int kgdb_connected;
53429 extern int kgdb_io_module_registered;
53430
53431 extern atomic_t kgdb_setting_breakpoint;
53432 -extern atomic_t kgdb_cpu_doing_single_step;
53433 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53434
53435 extern struct task_struct *kgdb_usethread;
53436 extern struct task_struct *kgdb_contthread;
53437 @@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
53438 * hardware debug registers.
53439 */
53440 struct kgdb_arch {
53441 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53442 - unsigned long flags;
53443 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53444 + const unsigned long flags;
53445
53446 int (*set_breakpoint)(unsigned long, char *);
53447 int (*remove_breakpoint)(unsigned long, char *);
53448 @@ -268,14 +268,14 @@ struct kgdb_arch {
53449 * not a console
53450 */
53451 struct kgdb_io {
53452 - const char *name;
53453 + const char * const name;
53454 int (*read_char) (void);
53455 void (*write_char) (u8);
53456 void (*flush) (void);
53457 int (*init) (void);
53458 void (*pre_exception) (void);
53459 void (*post_exception) (void);
53460 - int is_console;
53461 + const int is_console;
53462 };
53463
53464 extern struct kgdb_arch arch_kgdb_ops;
53465 diff -urNp linux-2.6.39.4/include/linux/kmod.h linux-2.6.39.4/include/linux/kmod.h
53466 --- linux-2.6.39.4/include/linux/kmod.h 2011-05-19 00:06:34.000000000 -0400
53467 +++ linux-2.6.39.4/include/linux/kmod.h 2011-08-05 19:44:37.000000000 -0400
53468 @@ -33,6 +33,8 @@ extern char modprobe_path[]; /* for sysc
53469 * usually useless though. */
53470 extern int __request_module(bool wait, const char *name, ...) \
53471 __attribute__((format(printf, 2, 3)));
53472 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53473 + __attribute__((format(printf, 3, 4)));
53474 #define request_module(mod...) __request_module(true, mod)
53475 #define request_module_nowait(mod...) __request_module(false, mod)
53476 #define try_then_request_module(x, mod...) \
53477 diff -urNp linux-2.6.39.4/include/linux/kvm_host.h linux-2.6.39.4/include/linux/kvm_host.h
53478 --- linux-2.6.39.4/include/linux/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
53479 +++ linux-2.6.39.4/include/linux/kvm_host.h 2011-08-05 19:44:37.000000000 -0400
53480 @@ -302,7 +302,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53481 void vcpu_load(struct kvm_vcpu *vcpu);
53482 void vcpu_put(struct kvm_vcpu *vcpu);
53483
53484 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53485 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53486 struct module *module);
53487 void kvm_exit(void);
53488
53489 @@ -442,7 +442,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53490 struct kvm_guest_debug *dbg);
53491 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53492
53493 -int kvm_arch_init(void *opaque);
53494 +int kvm_arch_init(const void *opaque);
53495 void kvm_arch_exit(void);
53496
53497 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53498 diff -urNp linux-2.6.39.4/include/linux/libata.h linux-2.6.39.4/include/linux/libata.h
53499 --- linux-2.6.39.4/include/linux/libata.h 2011-05-19 00:06:34.000000000 -0400
53500 +++ linux-2.6.39.4/include/linux/libata.h 2011-08-05 20:34:06.000000000 -0400
53501 @@ -898,7 +898,7 @@ struct ata_port_operations {
53502 * ->inherits must be the last field and all the preceding
53503 * fields must be pointers.
53504 */
53505 - const struct ata_port_operations *inherits;
53506 + const struct ata_port_operations * const inherits;
53507 };
53508
53509 struct ata_port_info {
53510 diff -urNp linux-2.6.39.4/include/linux/mca.h linux-2.6.39.4/include/linux/mca.h
53511 --- linux-2.6.39.4/include/linux/mca.h 2011-05-19 00:06:34.000000000 -0400
53512 +++ linux-2.6.39.4/include/linux/mca.h 2011-08-05 20:34:06.000000000 -0400
53513 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53514 int region);
53515 void * (*mca_transform_memory)(struct mca_device *,
53516 void *memory);
53517 -};
53518 +} __no_const;
53519
53520 struct mca_bus {
53521 u64 default_dma_mask;
53522 diff -urNp linux-2.6.39.4/include/linux/memory.h linux-2.6.39.4/include/linux/memory.h
53523 --- linux-2.6.39.4/include/linux/memory.h 2011-05-19 00:06:34.000000000 -0400
53524 +++ linux-2.6.39.4/include/linux/memory.h 2011-08-05 20:34:06.000000000 -0400
53525 @@ -142,7 +142,7 @@ struct memory_accessor {
53526 size_t count);
53527 ssize_t (*write)(struct memory_accessor *, const char *buf,
53528 off_t offset, size_t count);
53529 -};
53530 +} __no_const;
53531
53532 /*
53533 * Kernel text modification mutex, used for code patching. Users of this lock
53534 diff -urNp linux-2.6.39.4/include/linux/mfd/abx500.h linux-2.6.39.4/include/linux/mfd/abx500.h
53535 --- linux-2.6.39.4/include/linux/mfd/abx500.h 2011-05-19 00:06:34.000000000 -0400
53536 +++ linux-2.6.39.4/include/linux/mfd/abx500.h 2011-08-05 20:34:06.000000000 -0400
53537 @@ -226,6 +226,7 @@ struct abx500_ops {
53538 int (*event_registers_startup_state_get) (struct device *, u8 *);
53539 int (*startup_irq_enabled) (struct device *, unsigned int);
53540 };
53541 +typedef struct abx500_ops __no_const abx500_ops_no_const;
53542
53543 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53544 void abx500_remove_ops(struct device *dev);
53545 diff -urNp linux-2.6.39.4/include/linux/mm.h linux-2.6.39.4/include/linux/mm.h
53546 --- linux-2.6.39.4/include/linux/mm.h 2011-05-19 00:06:34.000000000 -0400
53547 +++ linux-2.6.39.4/include/linux/mm.h 2011-08-05 19:44:37.000000000 -0400
53548 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53549
53550 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53551 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53552 +
53553 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53554 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53555 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53556 +#else
53557 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53558 +#endif
53559 +
53560 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53561 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53562
53563 @@ -1010,34 +1017,6 @@ int set_page_dirty(struct page *page);
53564 int set_page_dirty_lock(struct page *page);
53565 int clear_page_dirty_for_io(struct page *page);
53566
53567 -/* Is the vma a continuation of the stack vma above it? */
53568 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53569 -{
53570 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53571 -}
53572 -
53573 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
53574 - unsigned long addr)
53575 -{
53576 - return (vma->vm_flags & VM_GROWSDOWN) &&
53577 - (vma->vm_start == addr) &&
53578 - !vma_growsdown(vma->vm_prev, addr);
53579 -}
53580 -
53581 -/* Is the vma a continuation of the stack vma below it? */
53582 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53583 -{
53584 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53585 -}
53586 -
53587 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
53588 - unsigned long addr)
53589 -{
53590 - return (vma->vm_flags & VM_GROWSUP) &&
53591 - (vma->vm_end == addr) &&
53592 - !vma_growsup(vma->vm_next, addr);
53593 -}
53594 -
53595 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53596 unsigned long old_addr, struct vm_area_struct *new_vma,
53597 unsigned long new_addr, unsigned long len);
53598 @@ -1189,6 +1168,15 @@ struct shrinker {
53599 extern void register_shrinker(struct shrinker *);
53600 extern void unregister_shrinker(struct shrinker *);
53601
53602 +#ifdef CONFIG_MMU
53603 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
53604 +#else
53605 +static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53606 +{
53607 + return __pgprot(0);
53608 +}
53609 +#endif
53610 +
53611 int vma_wants_writenotify(struct vm_area_struct *vma);
53612
53613 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53614 @@ -1476,6 +1464,7 @@ out:
53615 }
53616
53617 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53618 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53619
53620 extern unsigned long do_brk(unsigned long, unsigned long);
53621
53622 @@ -1532,6 +1521,10 @@ extern struct vm_area_struct * find_vma(
53623 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53624 struct vm_area_struct **pprev);
53625
53626 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53627 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53628 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53629 +
53630 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53631 NULL if none. Assume start_addr < end_addr. */
53632 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53633 @@ -1548,15 +1541,6 @@ static inline unsigned long vma_pages(st
53634 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53635 }
53636
53637 -#ifdef CONFIG_MMU
53638 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
53639 -#else
53640 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53641 -{
53642 - return __pgprot(0);
53643 -}
53644 -#endif
53645 -
53646 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53647 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53648 unsigned long pfn, unsigned long size, pgprot_t);
53649 @@ -1668,7 +1652,7 @@ extern int unpoison_memory(unsigned long
53650 extern int sysctl_memory_failure_early_kill;
53651 extern int sysctl_memory_failure_recovery;
53652 extern void shake_page(struct page *p, int access);
53653 -extern atomic_long_t mce_bad_pages;
53654 +extern atomic_long_unchecked_t mce_bad_pages;
53655 extern int soft_offline_page(struct page *page, int flags);
53656
53657 extern void dump_page(struct page *page);
53658 @@ -1682,5 +1666,11 @@ extern void copy_user_huge_page(struct p
53659 unsigned int pages_per_huge_page);
53660 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53661
53662 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53663 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53664 +#else
53665 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53666 +#endif
53667 +
53668 #endif /* __KERNEL__ */
53669 #endif /* _LINUX_MM_H */
53670 diff -urNp linux-2.6.39.4/include/linux/mm_types.h linux-2.6.39.4/include/linux/mm_types.h
53671 --- linux-2.6.39.4/include/linux/mm_types.h 2011-05-19 00:06:34.000000000 -0400
53672 +++ linux-2.6.39.4/include/linux/mm_types.h 2011-08-05 19:44:37.000000000 -0400
53673 @@ -183,6 +183,8 @@ struct vm_area_struct {
53674 #ifdef CONFIG_NUMA
53675 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53676 #endif
53677 +
53678 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53679 };
53680
53681 struct core_thread {
53682 @@ -317,6 +319,24 @@ struct mm_struct {
53683 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
53684 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
53685 #endif
53686 +
53687 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53688 + unsigned long pax_flags;
53689 +#endif
53690 +
53691 +#ifdef CONFIG_PAX_DLRESOLVE
53692 + unsigned long call_dl_resolve;
53693 +#endif
53694 +
53695 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53696 + unsigned long call_syscall;
53697 +#endif
53698 +
53699 +#ifdef CONFIG_PAX_ASLR
53700 + unsigned long delta_mmap; /* randomized offset */
53701 + unsigned long delta_stack; /* randomized offset */
53702 +#endif
53703 +
53704 };
53705
53706 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
53707 diff -urNp linux-2.6.39.4/include/linux/mmu_notifier.h linux-2.6.39.4/include/linux/mmu_notifier.h
53708 --- linux-2.6.39.4/include/linux/mmu_notifier.h 2011-05-19 00:06:34.000000000 -0400
53709 +++ linux-2.6.39.4/include/linux/mmu_notifier.h 2011-08-05 19:44:37.000000000 -0400
53710 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53711 */
53712 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53713 ({ \
53714 - pte_t __pte; \
53715 + pte_t ___pte; \
53716 struct vm_area_struct *___vma = __vma; \
53717 unsigned long ___address = __address; \
53718 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53719 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53720 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53721 - __pte; \
53722 + ___pte; \
53723 })
53724
53725 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53726 diff -urNp linux-2.6.39.4/include/linux/mmzone.h linux-2.6.39.4/include/linux/mmzone.h
53727 --- linux-2.6.39.4/include/linux/mmzone.h 2011-05-19 00:06:34.000000000 -0400
53728 +++ linux-2.6.39.4/include/linux/mmzone.h 2011-08-05 19:44:37.000000000 -0400
53729 @@ -355,7 +355,7 @@ struct zone {
53730 unsigned long flags; /* zone flags, see below */
53731
53732 /* Zone statistics */
53733 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53734 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53735
53736 /*
53737 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53738 diff -urNp linux-2.6.39.4/include/linux/mod_devicetable.h linux-2.6.39.4/include/linux/mod_devicetable.h
53739 --- linux-2.6.39.4/include/linux/mod_devicetable.h 2011-05-19 00:06:34.000000000 -0400
53740 +++ linux-2.6.39.4/include/linux/mod_devicetable.h 2011-08-05 19:44:37.000000000 -0400
53741 @@ -12,7 +12,7 @@
53742 typedef unsigned long kernel_ulong_t;
53743 #endif
53744
53745 -#define PCI_ANY_ID (~0)
53746 +#define PCI_ANY_ID ((__u16)~0)
53747
53748 struct pci_device_id {
53749 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53750 @@ -131,7 +131,7 @@ struct usb_device_id {
53751 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53752 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53753
53754 -#define HID_ANY_ID (~0)
53755 +#define HID_ANY_ID (~0U)
53756
53757 struct hid_device_id {
53758 __u16 bus;
53759 diff -urNp linux-2.6.39.4/include/linux/module.h linux-2.6.39.4/include/linux/module.h
53760 --- linux-2.6.39.4/include/linux/module.h 2011-05-19 00:06:34.000000000 -0400
53761 +++ linux-2.6.39.4/include/linux/module.h 2011-08-05 20:34:06.000000000 -0400
53762 @@ -16,6 +16,7 @@
53763 #include <linux/kobject.h>
53764 #include <linux/moduleparam.h>
53765 #include <linux/tracepoint.h>
53766 +#include <linux/fs.h>
53767
53768 #include <linux/percpu.h>
53769 #include <asm/module.h>
53770 @@ -324,19 +325,16 @@ struct module
53771 int (*init)(void);
53772
53773 /* If this is non-NULL, vfree after init() returns */
53774 - void *module_init;
53775 + void *module_init_rx, *module_init_rw;
53776
53777 /* Here is the actual code + data, vfree'd on unload. */
53778 - void *module_core;
53779 + void *module_core_rx, *module_core_rw;
53780
53781 /* Here are the sizes of the init and core sections */
53782 - unsigned int init_size, core_size;
53783 + unsigned int init_size_rw, core_size_rw;
53784
53785 /* The size of the executable code in each section. */
53786 - unsigned int init_text_size, core_text_size;
53787 -
53788 - /* Size of RO sections of the module (text+rodata) */
53789 - unsigned int init_ro_size, core_ro_size;
53790 + unsigned int init_size_rx, core_size_rx;
53791
53792 /* Arch-specific module values */
53793 struct mod_arch_specific arch;
53794 @@ -391,6 +389,10 @@ struct module
53795 #ifdef CONFIG_EVENT_TRACING
53796 struct ftrace_event_call **trace_events;
53797 unsigned int num_trace_events;
53798 + struct file_operations trace_id;
53799 + struct file_operations trace_enable;
53800 + struct file_operations trace_format;
53801 + struct file_operations trace_filter;
53802 #endif
53803 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53804 unsigned long *ftrace_callsites;
53805 @@ -441,16 +443,46 @@ bool is_module_address(unsigned long add
53806 bool is_module_percpu_address(unsigned long addr);
53807 bool is_module_text_address(unsigned long addr);
53808
53809 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53810 +{
53811 +
53812 +#ifdef CONFIG_PAX_KERNEXEC
53813 + if (ktla_ktva(addr) >= (unsigned long)start &&
53814 + ktla_ktva(addr) < (unsigned long)start + size)
53815 + return 1;
53816 +#endif
53817 +
53818 + return ((void *)addr >= start && (void *)addr < start + size);
53819 +}
53820 +
53821 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53822 +{
53823 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53824 +}
53825 +
53826 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53827 +{
53828 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53829 +}
53830 +
53831 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53832 +{
53833 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53834 +}
53835 +
53836 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53837 +{
53838 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53839 +}
53840 +
53841 static inline int within_module_core(unsigned long addr, struct module *mod)
53842 {
53843 - return (unsigned long)mod->module_core <= addr &&
53844 - addr < (unsigned long)mod->module_core + mod->core_size;
53845 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53846 }
53847
53848 static inline int within_module_init(unsigned long addr, struct module *mod)
53849 {
53850 - return (unsigned long)mod->module_init <= addr &&
53851 - addr < (unsigned long)mod->module_init + mod->init_size;
53852 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53853 }
53854
53855 /* Search for module by name: must hold module_mutex. */
53856 diff -urNp linux-2.6.39.4/include/linux/moduleloader.h linux-2.6.39.4/include/linux/moduleloader.h
53857 --- linux-2.6.39.4/include/linux/moduleloader.h 2011-05-19 00:06:34.000000000 -0400
53858 +++ linux-2.6.39.4/include/linux/moduleloader.h 2011-08-05 19:44:37.000000000 -0400
53859 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53860 sections. Returns NULL on failure. */
53861 void *module_alloc(unsigned long size);
53862
53863 +#ifdef CONFIG_PAX_KERNEXEC
53864 +void *module_alloc_exec(unsigned long size);
53865 +#else
53866 +#define module_alloc_exec(x) module_alloc(x)
53867 +#endif
53868 +
53869 /* Free memory returned from module_alloc. */
53870 void module_free(struct module *mod, void *module_region);
53871
53872 +#ifdef CONFIG_PAX_KERNEXEC
53873 +void module_free_exec(struct module *mod, void *module_region);
53874 +#else
53875 +#define module_free_exec(x, y) module_free((x), (y))
53876 +#endif
53877 +
53878 /* Apply the given relocation to the (simplified) ELF. Return -error
53879 or 0. */
53880 int apply_relocate(Elf_Shdr *sechdrs,
53881 diff -urNp linux-2.6.39.4/include/linux/moduleparam.h linux-2.6.39.4/include/linux/moduleparam.h
53882 --- linux-2.6.39.4/include/linux/moduleparam.h 2011-05-19 00:06:34.000000000 -0400
53883 +++ linux-2.6.39.4/include/linux/moduleparam.h 2011-08-05 20:34:06.000000000 -0400
53884 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53885 * @len is usually just sizeof(string).
53886 */
53887 #define module_param_string(name, string, len, perm) \
53888 - static const struct kparam_string __param_string_##name \
53889 + static const struct kparam_string __param_string_##name __used \
53890 = { len, string }; \
53891 __module_param_call(MODULE_PARAM_PREFIX, name, \
53892 &param_ops_string, \
53893 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53894 * module_param_named() for why this might be necessary.
53895 */
53896 #define module_param_array_named(name, array, type, nump, perm) \
53897 - static const struct kparam_array __param_arr_##name \
53898 + static const struct kparam_array __param_arr_##name __used \
53899 = { ARRAY_SIZE(array), nump, &param_ops_##type, \
53900 sizeof(array[0]), array }; \
53901 __module_param_call(MODULE_PARAM_PREFIX, name, \
53902 diff -urNp linux-2.6.39.4/include/linux/mutex.h linux-2.6.39.4/include/linux/mutex.h
53903 --- linux-2.6.39.4/include/linux/mutex.h 2011-05-19 00:06:34.000000000 -0400
53904 +++ linux-2.6.39.4/include/linux/mutex.h 2011-08-05 19:44:37.000000000 -0400
53905 @@ -51,7 +51,7 @@ struct mutex {
53906 spinlock_t wait_lock;
53907 struct list_head wait_list;
53908 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
53909 - struct thread_info *owner;
53910 + struct task_struct *owner;
53911 #endif
53912 #ifdef CONFIG_DEBUG_MUTEXES
53913 const char *name;
53914 diff -urNp linux-2.6.39.4/include/linux/namei.h linux-2.6.39.4/include/linux/namei.h
53915 --- linux-2.6.39.4/include/linux/namei.h 2011-05-19 00:06:34.000000000 -0400
53916 +++ linux-2.6.39.4/include/linux/namei.h 2011-08-05 19:44:37.000000000 -0400
53917 @@ -24,7 +24,7 @@ struct nameidata {
53918 unsigned seq;
53919 int last_type;
53920 unsigned depth;
53921 - char *saved_names[MAX_NESTED_LINKS + 1];
53922 + const char *saved_names[MAX_NESTED_LINKS + 1];
53923
53924 /* Intent data */
53925 union {
53926 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53927 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53928 extern void unlock_rename(struct dentry *, struct dentry *);
53929
53930 -static inline void nd_set_link(struct nameidata *nd, char *path)
53931 +static inline void nd_set_link(struct nameidata *nd, const char *path)
53932 {
53933 nd->saved_names[nd->depth] = path;
53934 }
53935
53936 -static inline char *nd_get_link(struct nameidata *nd)
53937 +static inline const char *nd_get_link(const struct nameidata *nd)
53938 {
53939 return nd->saved_names[nd->depth];
53940 }
53941 diff -urNp linux-2.6.39.4/include/linux/netdevice.h linux-2.6.39.4/include/linux/netdevice.h
53942 --- linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:11:51.000000000 -0400
53943 +++ linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:12:20.000000000 -0400
53944 @@ -979,6 +979,7 @@ struct net_device_ops {
53945 int (*ndo_set_features)(struct net_device *dev,
53946 u32 features);
53947 };
53948 +typedef struct net_device_ops __no_const net_device_ops_no_const;
53949
53950 /*
53951 * The DEVICE structure.
53952 diff -urNp linux-2.6.39.4/include/linux/netfilter/xt_gradm.h linux-2.6.39.4/include/linux/netfilter/xt_gradm.h
53953 --- linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53954 +++ linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 2011-08-05 19:44:37.000000000 -0400
53955 @@ -0,0 +1,9 @@
53956 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
53957 +#define _LINUX_NETFILTER_XT_GRADM_H 1
53958 +
53959 +struct xt_gradm_mtinfo {
53960 + __u16 flags;
53961 + __u16 invflags;
53962 +};
53963 +
53964 +#endif
53965 diff -urNp linux-2.6.39.4/include/linux/oprofile.h linux-2.6.39.4/include/linux/oprofile.h
53966 --- linux-2.6.39.4/include/linux/oprofile.h 2011-05-19 00:06:34.000000000 -0400
53967 +++ linux-2.6.39.4/include/linux/oprofile.h 2011-08-05 19:44:37.000000000 -0400
53968 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53969 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53970 char const * name, ulong * val);
53971
53972 -/** Create a file for read-only access to an atomic_t. */
53973 +/** Create a file for read-only access to an atomic_unchecked_t. */
53974 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53975 - char const * name, atomic_t * val);
53976 + char const * name, atomic_unchecked_t * val);
53977
53978 /** create a directory */
53979 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53980 diff -urNp linux-2.6.39.4/include/linux/padata.h linux-2.6.39.4/include/linux/padata.h
53981 --- linux-2.6.39.4/include/linux/padata.h 2011-05-19 00:06:34.000000000 -0400
53982 +++ linux-2.6.39.4/include/linux/padata.h 2011-08-05 19:44:37.000000000 -0400
53983 @@ -129,7 +129,7 @@ struct parallel_data {
53984 struct padata_instance *pinst;
53985 struct padata_parallel_queue __percpu *pqueue;
53986 struct padata_serial_queue __percpu *squeue;
53987 - atomic_t seq_nr;
53988 + atomic_unchecked_t seq_nr;
53989 atomic_t reorder_objects;
53990 atomic_t refcnt;
53991 unsigned int max_seq_nr;
53992 diff -urNp linux-2.6.39.4/include/linux/perf_event.h linux-2.6.39.4/include/linux/perf_event.h
53993 --- linux-2.6.39.4/include/linux/perf_event.h 2011-05-19 00:06:34.000000000 -0400
53994 +++ linux-2.6.39.4/include/linux/perf_event.h 2011-08-05 20:34:06.000000000 -0400
53995 @@ -759,8 +759,8 @@ struct perf_event {
53996
53997 enum perf_event_active_state state;
53998 unsigned int attach_state;
53999 - local64_t count;
54000 - atomic64_t child_count;
54001 + local64_t count; /* PaX: fix it one day */
54002 + atomic64_unchecked_t child_count;
54003
54004 /*
54005 * These are the total time in nanoseconds that the event
54006 @@ -811,8 +811,8 @@ struct perf_event {
54007 * These accumulate total time (in nanoseconds) that children
54008 * events have been enabled and running, respectively.
54009 */
54010 - atomic64_t child_total_time_enabled;
54011 - atomic64_t child_total_time_running;
54012 + atomic64_unchecked_t child_total_time_enabled;
54013 + atomic64_unchecked_t child_total_time_running;
54014
54015 /*
54016 * Protect attach/detach and child_list:
54017 diff -urNp linux-2.6.39.4/include/linux/pipe_fs_i.h linux-2.6.39.4/include/linux/pipe_fs_i.h
54018 --- linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-05-19 00:06:34.000000000 -0400
54019 +++ linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-08-05 19:44:37.000000000 -0400
54020 @@ -46,9 +46,9 @@ struct pipe_buffer {
54021 struct pipe_inode_info {
54022 wait_queue_head_t wait;
54023 unsigned int nrbufs, curbuf, buffers;
54024 - unsigned int readers;
54025 - unsigned int writers;
54026 - unsigned int waiting_writers;
54027 + atomic_t readers;
54028 + atomic_t writers;
54029 + atomic_t waiting_writers;
54030 unsigned int r_counter;
54031 unsigned int w_counter;
54032 struct page *tmp_page;
54033 diff -urNp linux-2.6.39.4/include/linux/pm_runtime.h linux-2.6.39.4/include/linux/pm_runtime.h
54034 --- linux-2.6.39.4/include/linux/pm_runtime.h 2011-05-19 00:06:34.000000000 -0400
54035 +++ linux-2.6.39.4/include/linux/pm_runtime.h 2011-08-05 19:44:37.000000000 -0400
54036 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
54037
54038 static inline void pm_runtime_mark_last_busy(struct device *dev)
54039 {
54040 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
54041 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
54042 }
54043
54044 #else /* !CONFIG_PM_RUNTIME */
54045 diff -urNp linux-2.6.39.4/include/linux/poison.h linux-2.6.39.4/include/linux/poison.h
54046 --- linux-2.6.39.4/include/linux/poison.h 2011-05-19 00:06:34.000000000 -0400
54047 +++ linux-2.6.39.4/include/linux/poison.h 2011-08-05 19:44:37.000000000 -0400
54048 @@ -19,8 +19,8 @@
54049 * under normal circumstances, used to verify that nobody uses
54050 * non-initialized list entries.
54051 */
54052 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
54053 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
54054 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
54055 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
54056
54057 /********** include/linux/timer.h **********/
54058 /*
54059 diff -urNp linux-2.6.39.4/include/linux/preempt.h linux-2.6.39.4/include/linux/preempt.h
54060 --- linux-2.6.39.4/include/linux/preempt.h 2011-05-19 00:06:34.000000000 -0400
54061 +++ linux-2.6.39.4/include/linux/preempt.h 2011-08-05 20:34:06.000000000 -0400
54062 @@ -115,7 +115,7 @@ struct preempt_ops {
54063 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
54064 void (*sched_out)(struct preempt_notifier *notifier,
54065 struct task_struct *next);
54066 -};
54067 +} __no_const;
54068
54069 /**
54070 * preempt_notifier - key for installing preemption notifiers
54071 diff -urNp linux-2.6.39.4/include/linux/proc_fs.h linux-2.6.39.4/include/linux/proc_fs.h
54072 --- linux-2.6.39.4/include/linux/proc_fs.h 2011-05-19 00:06:34.000000000 -0400
54073 +++ linux-2.6.39.4/include/linux/proc_fs.h 2011-08-05 20:34:06.000000000 -0400
54074 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
54075 return proc_create_data(name, mode, parent, proc_fops, NULL);
54076 }
54077
54078 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
54079 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
54080 +{
54081 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54082 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
54083 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54084 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
54085 +#else
54086 + return proc_create_data(name, mode, parent, proc_fops, NULL);
54087 +#endif
54088 +}
54089 +
54090 +
54091 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
54092 mode_t mode, struct proc_dir_entry *base,
54093 read_proc_t *read_proc, void * data)
54094 @@ -258,7 +271,7 @@ union proc_op {
54095 int (*proc_show)(struct seq_file *m,
54096 struct pid_namespace *ns, struct pid *pid,
54097 struct task_struct *task);
54098 -};
54099 +} __no_const;
54100
54101 struct ctl_table_header;
54102 struct ctl_table;
54103 diff -urNp linux-2.6.39.4/include/linux/ptrace.h linux-2.6.39.4/include/linux/ptrace.h
54104 --- linux-2.6.39.4/include/linux/ptrace.h 2011-05-19 00:06:34.000000000 -0400
54105 +++ linux-2.6.39.4/include/linux/ptrace.h 2011-08-05 19:44:37.000000000 -0400
54106 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
54107 extern void exit_ptrace(struct task_struct *tracer);
54108 #define PTRACE_MODE_READ 1
54109 #define PTRACE_MODE_ATTACH 2
54110 -/* Returns 0 on success, -errno on denial. */
54111 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
54112 /* Returns true on success, false on denial. */
54113 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
54114 +/* Returns true on success, false on denial. */
54115 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
54116
54117 static inline int ptrace_reparented(struct task_struct *child)
54118 {
54119 diff -urNp linux-2.6.39.4/include/linux/random.h linux-2.6.39.4/include/linux/random.h
54120 --- linux-2.6.39.4/include/linux/random.h 2011-05-19 00:06:34.000000000 -0400
54121 +++ linux-2.6.39.4/include/linux/random.h 2011-08-05 19:44:37.000000000 -0400
54122 @@ -80,12 +80,17 @@ void srandom32(u32 seed);
54123
54124 u32 prandom32(struct rnd_state *);
54125
54126 +static inline unsigned long pax_get_random_long(void)
54127 +{
54128 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
54129 +}
54130 +
54131 /*
54132 * Handle minimum values for seeds
54133 */
54134 static inline u32 __seed(u32 x, u32 m)
54135 {
54136 - return (x < m) ? x + m : x;
54137 + return (x <= m) ? x + m + 1 : x;
54138 }
54139
54140 /**
54141 diff -urNp linux-2.6.39.4/include/linux/reboot.h linux-2.6.39.4/include/linux/reboot.h
54142 --- linux-2.6.39.4/include/linux/reboot.h 2011-05-19 00:06:34.000000000 -0400
54143 +++ linux-2.6.39.4/include/linux/reboot.h 2011-08-05 19:44:37.000000000 -0400
54144 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
54145 * Architecture-specific implementations of sys_reboot commands.
54146 */
54147
54148 -extern void machine_restart(char *cmd);
54149 -extern void machine_halt(void);
54150 -extern void machine_power_off(void);
54151 +extern void machine_restart(char *cmd) __noreturn;
54152 +extern void machine_halt(void) __noreturn;
54153 +extern void machine_power_off(void) __noreturn;
54154
54155 extern void machine_shutdown(void);
54156 struct pt_regs;
54157 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
54158 */
54159
54160 extern void kernel_restart_prepare(char *cmd);
54161 -extern void kernel_restart(char *cmd);
54162 -extern void kernel_halt(void);
54163 -extern void kernel_power_off(void);
54164 +extern void kernel_restart(char *cmd) __noreturn;
54165 +extern void kernel_halt(void) __noreturn;
54166 +extern void kernel_power_off(void) __noreturn;
54167
54168 extern int C_A_D; /* for sysctl */
54169 void ctrl_alt_del(void);
54170 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
54171 * Emergency restart, callable from an interrupt handler.
54172 */
54173
54174 -extern void emergency_restart(void);
54175 +extern void emergency_restart(void) __noreturn;
54176 #include <asm/emergency-restart.h>
54177
54178 #endif
54179 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs.h linux-2.6.39.4/include/linux/reiserfs_fs.h
54180 --- linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-05-19 00:06:34.000000000 -0400
54181 +++ linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-08-05 20:34:06.000000000 -0400
54182 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
54183 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
54184
54185 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
54186 -#define get_generation(s) atomic_read (&fs_generation(s))
54187 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
54188 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
54189 #define __fs_changed(gen,s) (gen != get_generation (s))
54190 #define fs_changed(gen,s) \
54191 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs_sb.h linux-2.6.39.4/include/linux/reiserfs_fs_sb.h
54192 --- linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-05-19 00:06:34.000000000 -0400
54193 +++ linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-08-05 19:44:37.000000000 -0400
54194 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
54195 /* Comment? -Hans */
54196 wait_queue_head_t s_wait;
54197 /* To be obsoleted soon by per buffer seals.. -Hans */
54198 - atomic_t s_generation_counter; // increased by one every time the
54199 + atomic_unchecked_t s_generation_counter; // increased by one every time the
54200 // tree gets re-balanced
54201 unsigned long s_properties; /* File system properties. Currently holds
54202 on-disk FS format */
54203 diff -urNp linux-2.6.39.4/include/linux/relay.h linux-2.6.39.4/include/linux/relay.h
54204 --- linux-2.6.39.4/include/linux/relay.h 2011-05-19 00:06:34.000000000 -0400
54205 +++ linux-2.6.39.4/include/linux/relay.h 2011-08-05 20:34:06.000000000 -0400
54206 @@ -159,7 +159,7 @@ struct rchan_callbacks
54207 * The callback should return 0 if successful, negative if not.
54208 */
54209 int (*remove_buf_file)(struct dentry *dentry);
54210 -};
54211 +} __no_const;
54212
54213 /*
54214 * CONFIG_RELAY kernel API, kernel/relay.c
54215 diff -urNp linux-2.6.39.4/include/linux/rfkill.h linux-2.6.39.4/include/linux/rfkill.h
54216 --- linux-2.6.39.4/include/linux/rfkill.h 2011-05-19 00:06:34.000000000 -0400
54217 +++ linux-2.6.39.4/include/linux/rfkill.h 2011-08-05 20:34:06.000000000 -0400
54218 @@ -147,6 +147,7 @@ struct rfkill_ops {
54219 void (*query)(struct rfkill *rfkill, void *data);
54220 int (*set_block)(void *data, bool blocked);
54221 };
54222 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54223
54224 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54225 /**
54226 diff -urNp linux-2.6.39.4/include/linux/rmap.h linux-2.6.39.4/include/linux/rmap.h
54227 --- linux-2.6.39.4/include/linux/rmap.h 2011-05-19 00:06:34.000000000 -0400
54228 +++ linux-2.6.39.4/include/linux/rmap.h 2011-08-05 19:44:37.000000000 -0400
54229 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54230 void anon_vma_init(void); /* create anon_vma_cachep */
54231 int anon_vma_prepare(struct vm_area_struct *);
54232 void unlink_anon_vmas(struct vm_area_struct *);
54233 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54234 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54235 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54236 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54237 void __anon_vma_link(struct vm_area_struct *);
54238
54239 static inline void anon_vma_merge(struct vm_area_struct *vma,
54240 diff -urNp linux-2.6.39.4/include/linux/sched.h linux-2.6.39.4/include/linux/sched.h
54241 --- linux-2.6.39.4/include/linux/sched.h 2011-05-19 00:06:34.000000000 -0400
54242 +++ linux-2.6.39.4/include/linux/sched.h 2011-08-05 20:34:06.000000000 -0400
54243 @@ -100,6 +100,7 @@ struct bio_list;
54244 struct fs_struct;
54245 struct perf_event_context;
54246 struct blk_plug;
54247 +struct linux_binprm;
54248
54249 /*
54250 * List of flags we want to share for kernel threads,
54251 @@ -360,7 +361,7 @@ extern signed long schedule_timeout_inte
54252 extern signed long schedule_timeout_killable(signed long timeout);
54253 extern signed long schedule_timeout_uninterruptible(signed long timeout);
54254 asmlinkage void schedule(void);
54255 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
54256 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
54257
54258 struct nsproxy;
54259 struct user_namespace;
54260 @@ -381,10 +382,13 @@ struct user_namespace;
54261 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54262
54263 extern int sysctl_max_map_count;
54264 +extern unsigned long sysctl_heap_stack_gap;
54265
54266 #include <linux/aio.h>
54267
54268 #ifdef CONFIG_MMU
54269 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54270 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54271 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54272 extern unsigned long
54273 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54274 @@ -629,6 +633,17 @@ struct signal_struct {
54275 #ifdef CONFIG_TASKSTATS
54276 struct taskstats *stats;
54277 #endif
54278 +
54279 +#ifdef CONFIG_GRKERNSEC
54280 + u32 curr_ip;
54281 + u32 saved_ip;
54282 + u32 gr_saddr;
54283 + u32 gr_daddr;
54284 + u16 gr_sport;
54285 + u16 gr_dport;
54286 + u8 used_accept:1;
54287 +#endif
54288 +
54289 #ifdef CONFIG_AUDIT
54290 unsigned audit_tty;
54291 struct tty_audit_buf *tty_audit_buf;
54292 @@ -701,6 +716,11 @@ struct user_struct {
54293 struct key *session_keyring; /* UID's default session keyring */
54294 #endif
54295
54296 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54297 + unsigned int banned;
54298 + unsigned long ban_expires;
54299 +#endif
54300 +
54301 /* Hash table maintenance information */
54302 struct hlist_node uidhash_node;
54303 uid_t uid;
54304 @@ -1310,8 +1330,8 @@ struct task_struct {
54305 struct list_head thread_group;
54306
54307 struct completion *vfork_done; /* for vfork() */
54308 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54309 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54310 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54311 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54312
54313 cputime_t utime, stime, utimescaled, stimescaled;
54314 cputime_t gtime;
54315 @@ -1327,13 +1347,6 @@ struct task_struct {
54316 struct task_cputime cputime_expires;
54317 struct list_head cpu_timers[3];
54318
54319 -/* process credentials */
54320 - const struct cred __rcu *real_cred; /* objective and real subjective task
54321 - * credentials (COW) */
54322 - const struct cred __rcu *cred; /* effective (overridable) subjective task
54323 - * credentials (COW) */
54324 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54325 -
54326 char comm[TASK_COMM_LEN]; /* executable name excluding path
54327 - access with [gs]et_task_comm (which lock
54328 it with task_lock())
54329 @@ -1350,8 +1363,16 @@ struct task_struct {
54330 #endif
54331 /* CPU-specific state of this task */
54332 struct thread_struct thread;
54333 +/* thread_info moved to task_struct */
54334 +#ifdef CONFIG_X86
54335 + struct thread_info tinfo;
54336 +#endif
54337 /* filesystem information */
54338 struct fs_struct *fs;
54339 +
54340 + const struct cred __rcu *cred; /* effective (overridable) subjective task
54341 + * credentials (COW) */
54342 +
54343 /* open file information */
54344 struct files_struct *files;
54345 /* namespaces */
54346 @@ -1398,6 +1419,11 @@ struct task_struct {
54347 struct rt_mutex_waiter *pi_blocked_on;
54348 #endif
54349
54350 +/* process credentials */
54351 + const struct cred __rcu *real_cred; /* objective and real subjective task
54352 + * credentials (COW) */
54353 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54354 +
54355 #ifdef CONFIG_DEBUG_MUTEXES
54356 /* mutex deadlock detection */
54357 struct mutex_waiter *blocked_on;
54358 @@ -1508,6 +1534,21 @@ struct task_struct {
54359 unsigned long default_timer_slack_ns;
54360
54361 struct list_head *scm_work_list;
54362 +
54363 +#ifdef CONFIG_GRKERNSEC
54364 + /* grsecurity */
54365 + struct dentry *gr_chroot_dentry;
54366 + struct acl_subject_label *acl;
54367 + struct acl_role_label *role;
54368 + struct file *exec_file;
54369 + u16 acl_role_id;
54370 + /* is this the task that authenticated to the special role */
54371 + u8 acl_sp_role;
54372 + u8 is_writable;
54373 + u8 brute;
54374 + u8 gr_is_chrooted;
54375 +#endif
54376 +
54377 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54378 /* Index of current stored address in ret_stack */
54379 int curr_ret_stack;
54380 @@ -1542,6 +1583,57 @@ struct task_struct {
54381 #endif
54382 };
54383
54384 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54385 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54386 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54387 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54388 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54389 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54390 +
54391 +#ifdef CONFIG_PAX_SOFTMODE
54392 +extern int pax_softmode;
54393 +#endif
54394 +
54395 +extern int pax_check_flags(unsigned long *);
54396 +
54397 +/* if tsk != current then task_lock must be held on it */
54398 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54399 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
54400 +{
54401 + if (likely(tsk->mm))
54402 + return tsk->mm->pax_flags;
54403 + else
54404 + return 0UL;
54405 +}
54406 +
54407 +/* if tsk != current then task_lock must be held on it */
54408 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54409 +{
54410 + if (likely(tsk->mm)) {
54411 + tsk->mm->pax_flags = flags;
54412 + return 0;
54413 + }
54414 + return -EINVAL;
54415 +}
54416 +#endif
54417 +
54418 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54419 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
54420 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54421 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54422 +#endif
54423 +
54424 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54425 +extern void pax_report_insns(void *pc, void *sp);
54426 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
54427 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54428 +
54429 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54430 +extern void pax_track_stack(void);
54431 +#else
54432 +static inline void pax_track_stack(void) {}
54433 +#endif
54434 +
54435 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54436 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54437
54438 @@ -2009,7 +2101,9 @@ void yield(void);
54439 extern struct exec_domain default_exec_domain;
54440
54441 union thread_union {
54442 +#ifndef CONFIG_X86
54443 struct thread_info thread_info;
54444 +#endif
54445 unsigned long stack[THREAD_SIZE/sizeof(long)];
54446 };
54447
54448 @@ -2042,6 +2136,7 @@ extern struct pid_namespace init_pid_ns;
54449 */
54450
54451 extern struct task_struct *find_task_by_vpid(pid_t nr);
54452 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54453 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54454 struct pid_namespace *ns);
54455
54456 @@ -2179,7 +2274,7 @@ extern void __cleanup_sighand(struct sig
54457 extern void exit_itimers(struct signal_struct *);
54458 extern void flush_itimer_signals(void);
54459
54460 -extern NORET_TYPE void do_group_exit(int);
54461 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54462
54463 extern void daemonize(const char *, ...);
54464 extern int allow_signal(int);
54465 @@ -2320,13 +2415,17 @@ static inline unsigned long *end_of_stac
54466
54467 #endif
54468
54469 -static inline int object_is_on_stack(void *obj)
54470 +static inline int object_starts_on_stack(void *obj)
54471 {
54472 - void *stack = task_stack_page(current);
54473 + const void *stack = task_stack_page(current);
54474
54475 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54476 }
54477
54478 +#ifdef CONFIG_PAX_USERCOPY
54479 +extern int object_is_on_stack(const void *obj, unsigned long len);
54480 +#endif
54481 +
54482 extern void thread_info_cache_init(void);
54483
54484 #ifdef CONFIG_DEBUG_STACK_USAGE
54485 diff -urNp linux-2.6.39.4/include/linux/screen_info.h linux-2.6.39.4/include/linux/screen_info.h
54486 --- linux-2.6.39.4/include/linux/screen_info.h 2011-05-19 00:06:34.000000000 -0400
54487 +++ linux-2.6.39.4/include/linux/screen_info.h 2011-08-05 19:44:37.000000000 -0400
54488 @@ -43,7 +43,8 @@ struct screen_info {
54489 __u16 pages; /* 0x32 */
54490 __u16 vesa_attributes; /* 0x34 */
54491 __u32 capabilities; /* 0x36 */
54492 - __u8 _reserved[6]; /* 0x3a */
54493 + __u16 vesapm_size; /* 0x3a */
54494 + __u8 _reserved[4]; /* 0x3c */
54495 } __attribute__((packed));
54496
54497 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54498 diff -urNp linux-2.6.39.4/include/linux/security.h linux-2.6.39.4/include/linux/security.h
54499 --- linux-2.6.39.4/include/linux/security.h 2011-05-19 00:06:34.000000000 -0400
54500 +++ linux-2.6.39.4/include/linux/security.h 2011-08-05 19:44:37.000000000 -0400
54501 @@ -36,6 +36,7 @@
54502 #include <linux/key.h>
54503 #include <linux/xfrm.h>
54504 #include <linux/slab.h>
54505 +#include <linux/grsecurity.h>
54506 #include <net/flow.h>
54507
54508 /* Maximum number of letters for an LSM name string */
54509 diff -urNp linux-2.6.39.4/include/linux/seq_file.h linux-2.6.39.4/include/linux/seq_file.h
54510 --- linux-2.6.39.4/include/linux/seq_file.h 2011-05-19 00:06:34.000000000 -0400
54511 +++ linux-2.6.39.4/include/linux/seq_file.h 2011-08-05 20:34:06.000000000 -0400
54512 @@ -32,6 +32,7 @@ struct seq_operations {
54513 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54514 int (*show) (struct seq_file *m, void *v);
54515 };
54516 +typedef struct seq_operations __no_const seq_operations_no_const;
54517
54518 #define SEQ_SKIP 1
54519
54520 diff -urNp linux-2.6.39.4/include/linux/shm.h linux-2.6.39.4/include/linux/shm.h
54521 --- linux-2.6.39.4/include/linux/shm.h 2011-05-19 00:06:34.000000000 -0400
54522 +++ linux-2.6.39.4/include/linux/shm.h 2011-08-05 19:44:37.000000000 -0400
54523 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54524 pid_t shm_cprid;
54525 pid_t shm_lprid;
54526 struct user_struct *mlock_user;
54527 +#ifdef CONFIG_GRKERNSEC
54528 + time_t shm_createtime;
54529 + pid_t shm_lapid;
54530 +#endif
54531 };
54532
54533 /* shm_mode upper byte flags */
54534 diff -urNp linux-2.6.39.4/include/linux/skbuff.h linux-2.6.39.4/include/linux/skbuff.h
54535 --- linux-2.6.39.4/include/linux/skbuff.h 2011-05-19 00:06:34.000000000 -0400
54536 +++ linux-2.6.39.4/include/linux/skbuff.h 2011-08-05 19:44:37.000000000 -0400
54537 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54538 */
54539 static inline int skb_queue_empty(const struct sk_buff_head *list)
54540 {
54541 - return list->next == (struct sk_buff *)list;
54542 + return list->next == (const struct sk_buff *)list;
54543 }
54544
54545 /**
54546 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54547 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54548 const struct sk_buff *skb)
54549 {
54550 - return skb->next == (struct sk_buff *)list;
54551 + return skb->next == (const struct sk_buff *)list;
54552 }
54553
54554 /**
54555 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54556 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54557 const struct sk_buff *skb)
54558 {
54559 - return skb->prev == (struct sk_buff *)list;
54560 + return skb->prev == (const struct sk_buff *)list;
54561 }
54562
54563 /**
54564 @@ -1435,7 +1435,7 @@ static inline int pskb_network_may_pull(
54565 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54566 */
54567 #ifndef NET_SKB_PAD
54568 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54569 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54570 #endif
54571
54572 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54573 diff -urNp linux-2.6.39.4/include/linux/slab_def.h linux-2.6.39.4/include/linux/slab_def.h
54574 --- linux-2.6.39.4/include/linux/slab_def.h 2011-05-19 00:06:34.000000000 -0400
54575 +++ linux-2.6.39.4/include/linux/slab_def.h 2011-08-05 19:44:37.000000000 -0400
54576 @@ -96,10 +96,10 @@ struct kmem_cache {
54577 unsigned long node_allocs;
54578 unsigned long node_frees;
54579 unsigned long node_overflow;
54580 - atomic_t allochit;
54581 - atomic_t allocmiss;
54582 - atomic_t freehit;
54583 - atomic_t freemiss;
54584 + atomic_unchecked_t allochit;
54585 + atomic_unchecked_t allocmiss;
54586 + atomic_unchecked_t freehit;
54587 + atomic_unchecked_t freemiss;
54588
54589 /*
54590 * If debugging is enabled, then the allocator can add additional
54591 diff -urNp linux-2.6.39.4/include/linux/slab.h linux-2.6.39.4/include/linux/slab.h
54592 --- linux-2.6.39.4/include/linux/slab.h 2011-05-19 00:06:34.000000000 -0400
54593 +++ linux-2.6.39.4/include/linux/slab.h 2011-08-05 19:44:37.000000000 -0400
54594 @@ -11,12 +11,20 @@
54595
54596 #include <linux/gfp.h>
54597 #include <linux/types.h>
54598 +#include <linux/err.h>
54599
54600 /*
54601 * Flags to pass to kmem_cache_create().
54602 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54603 */
54604 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54605 +
54606 +#ifdef CONFIG_PAX_USERCOPY
54607 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54608 +#else
54609 +#define SLAB_USERCOPY 0x00000000UL
54610 +#endif
54611 +
54612 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54613 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54614 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54615 @@ -87,10 +95,13 @@
54616 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54617 * Both make kfree a no-op.
54618 */
54619 -#define ZERO_SIZE_PTR ((void *)16)
54620 +#define ZERO_SIZE_PTR \
54621 +({ \
54622 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54623 + (void *)(-MAX_ERRNO-1L); \
54624 +})
54625
54626 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54627 - (unsigned long)ZERO_SIZE_PTR)
54628 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54629
54630 /*
54631 * struct kmem_cache related prototypes
54632 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54633 void kfree(const void *);
54634 void kzfree(const void *);
54635 size_t ksize(const void *);
54636 +void check_object_size(const void *ptr, unsigned long n, bool to);
54637
54638 /*
54639 * Allocator specific definitions. These are mainly used to establish optimized
54640 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54641
54642 void __init kmem_cache_init_late(void);
54643
54644 +#define kmalloc(x, y) \
54645 +({ \
54646 + void *___retval; \
54647 + intoverflow_t ___x = (intoverflow_t)x; \
54648 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54649 + ___retval = NULL; \
54650 + else \
54651 + ___retval = kmalloc((size_t)___x, (y)); \
54652 + ___retval; \
54653 +})
54654 +
54655 +#define kmalloc_node(x, y, z) \
54656 +({ \
54657 + void *___retval; \
54658 + intoverflow_t ___x = (intoverflow_t)x; \
54659 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54660 + ___retval = NULL; \
54661 + else \
54662 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
54663 + ___retval; \
54664 +})
54665 +
54666 +#define kzalloc(x, y) \
54667 +({ \
54668 + void *___retval; \
54669 + intoverflow_t ___x = (intoverflow_t)x; \
54670 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54671 + ___retval = NULL; \
54672 + else \
54673 + ___retval = kzalloc((size_t)___x, (y)); \
54674 + ___retval; \
54675 +})
54676 +
54677 +#define __krealloc(x, y, z) \
54678 +({ \
54679 + void *___retval; \
54680 + intoverflow_t ___y = (intoverflow_t)y; \
54681 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54682 + ___retval = NULL; \
54683 + else \
54684 + ___retval = __krealloc((x), (size_t)___y, (z)); \
54685 + ___retval; \
54686 +})
54687 +
54688 +#define krealloc(x, y, z) \
54689 +({ \
54690 + void *___retval; \
54691 + intoverflow_t ___y = (intoverflow_t)y; \
54692 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54693 + ___retval = NULL; \
54694 + else \
54695 + ___retval = krealloc((x), (size_t)___y, (z)); \
54696 + ___retval; \
54697 +})
54698 +
54699 #endif /* _LINUX_SLAB_H */
54700 diff -urNp linux-2.6.39.4/include/linux/slub_def.h linux-2.6.39.4/include/linux/slub_def.h
54701 --- linux-2.6.39.4/include/linux/slub_def.h 2011-05-19 00:06:34.000000000 -0400
54702 +++ linux-2.6.39.4/include/linux/slub_def.h 2011-08-05 20:34:06.000000000 -0400
54703 @@ -84,7 +84,7 @@ struct kmem_cache {
54704 struct kmem_cache_order_objects max;
54705 struct kmem_cache_order_objects min;
54706 gfp_t allocflags; /* gfp flags to use on each alloc */
54707 - int refcount; /* Refcount for slab cache destroy */
54708 + atomic_t refcount; /* Refcount for slab cache destroy */
54709 void (*ctor)(void *);
54710 int inuse; /* Offset to metadata */
54711 int align; /* Alignment */
54712 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54713 }
54714
54715 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54716 -void *__kmalloc(size_t size, gfp_t flags);
54717 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54718
54719 static __always_inline void *
54720 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54721 diff -urNp linux-2.6.39.4/include/linux/sonet.h linux-2.6.39.4/include/linux/sonet.h
54722 --- linux-2.6.39.4/include/linux/sonet.h 2011-05-19 00:06:34.000000000 -0400
54723 +++ linux-2.6.39.4/include/linux/sonet.h 2011-08-05 19:44:37.000000000 -0400
54724 @@ -61,7 +61,7 @@ struct sonet_stats {
54725 #include <asm/atomic.h>
54726
54727 struct k_sonet_stats {
54728 -#define __HANDLE_ITEM(i) atomic_t i
54729 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54730 __SONET_ITEMS
54731 #undef __HANDLE_ITEM
54732 };
54733 diff -urNp linux-2.6.39.4/include/linux/sunrpc/clnt.h linux-2.6.39.4/include/linux/sunrpc/clnt.h
54734 --- linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-05-19 00:06:34.000000000 -0400
54735 +++ linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-08-05 19:44:37.000000000 -0400
54736 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54737 {
54738 switch (sap->sa_family) {
54739 case AF_INET:
54740 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
54741 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54742 case AF_INET6:
54743 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54744 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54745 }
54746 return 0;
54747 }
54748 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54749 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54750 const struct sockaddr *src)
54751 {
54752 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54753 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54754 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54755
54756 dsin->sin_family = ssin->sin_family;
54757 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54758 if (sa->sa_family != AF_INET6)
54759 return 0;
54760
54761 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54762 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54763 }
54764
54765 #endif /* __KERNEL__ */
54766 diff -urNp linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h
54767 --- linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-05-19 00:06:34.000000000 -0400
54768 +++ linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-08-05 19:44:37.000000000 -0400
54769 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54770 extern unsigned int svcrdma_max_requests;
54771 extern unsigned int svcrdma_max_req_size;
54772
54773 -extern atomic_t rdma_stat_recv;
54774 -extern atomic_t rdma_stat_read;
54775 -extern atomic_t rdma_stat_write;
54776 -extern atomic_t rdma_stat_sq_starve;
54777 -extern atomic_t rdma_stat_rq_starve;
54778 -extern atomic_t rdma_stat_rq_poll;
54779 -extern atomic_t rdma_stat_rq_prod;
54780 -extern atomic_t rdma_stat_sq_poll;
54781 -extern atomic_t rdma_stat_sq_prod;
54782 +extern atomic_unchecked_t rdma_stat_recv;
54783 +extern atomic_unchecked_t rdma_stat_read;
54784 +extern atomic_unchecked_t rdma_stat_write;
54785 +extern atomic_unchecked_t rdma_stat_sq_starve;
54786 +extern atomic_unchecked_t rdma_stat_rq_starve;
54787 +extern atomic_unchecked_t rdma_stat_rq_poll;
54788 +extern atomic_unchecked_t rdma_stat_rq_prod;
54789 +extern atomic_unchecked_t rdma_stat_sq_poll;
54790 +extern atomic_unchecked_t rdma_stat_sq_prod;
54791
54792 #define RPCRDMA_VERSION 1
54793
54794 diff -urNp linux-2.6.39.4/include/linux/sysctl.h linux-2.6.39.4/include/linux/sysctl.h
54795 --- linux-2.6.39.4/include/linux/sysctl.h 2011-05-19 00:06:34.000000000 -0400
54796 +++ linux-2.6.39.4/include/linux/sysctl.h 2011-08-05 19:44:37.000000000 -0400
54797 @@ -155,7 +155,11 @@ enum
54798 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54799 };
54800
54801 -
54802 +#ifdef CONFIG_PAX_SOFTMODE
54803 +enum {
54804 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54805 +};
54806 +#endif
54807
54808 /* CTL_VM names: */
54809 enum
54810 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54811
54812 extern int proc_dostring(struct ctl_table *, int,
54813 void __user *, size_t *, loff_t *);
54814 +extern int proc_dostring_modpriv(struct ctl_table *, int,
54815 + void __user *, size_t *, loff_t *);
54816 extern int proc_dointvec(struct ctl_table *, int,
54817 void __user *, size_t *, loff_t *);
54818 extern int proc_dointvec_minmax(struct ctl_table *, int,
54819 diff -urNp linux-2.6.39.4/include/linux/tty_ldisc.h linux-2.6.39.4/include/linux/tty_ldisc.h
54820 --- linux-2.6.39.4/include/linux/tty_ldisc.h 2011-05-19 00:06:34.000000000 -0400
54821 +++ linux-2.6.39.4/include/linux/tty_ldisc.h 2011-08-05 19:44:37.000000000 -0400
54822 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54823
54824 struct module *owner;
54825
54826 - int refcount;
54827 + atomic_t refcount;
54828 };
54829
54830 struct tty_ldisc {
54831 diff -urNp linux-2.6.39.4/include/linux/types.h linux-2.6.39.4/include/linux/types.h
54832 --- linux-2.6.39.4/include/linux/types.h 2011-05-19 00:06:34.000000000 -0400
54833 +++ linux-2.6.39.4/include/linux/types.h 2011-08-05 19:44:37.000000000 -0400
54834 @@ -213,10 +213,26 @@ typedef struct {
54835 int counter;
54836 } atomic_t;
54837
54838 +#ifdef CONFIG_PAX_REFCOUNT
54839 +typedef struct {
54840 + int counter;
54841 +} atomic_unchecked_t;
54842 +#else
54843 +typedef atomic_t atomic_unchecked_t;
54844 +#endif
54845 +
54846 #ifdef CONFIG_64BIT
54847 typedef struct {
54848 long counter;
54849 } atomic64_t;
54850 +
54851 +#ifdef CONFIG_PAX_REFCOUNT
54852 +typedef struct {
54853 + long counter;
54854 +} atomic64_unchecked_t;
54855 +#else
54856 +typedef atomic64_t atomic64_unchecked_t;
54857 +#endif
54858 #endif
54859
54860 struct list_head {
54861 diff -urNp linux-2.6.39.4/include/linux/uaccess.h linux-2.6.39.4/include/linux/uaccess.h
54862 --- linux-2.6.39.4/include/linux/uaccess.h 2011-05-19 00:06:34.000000000 -0400
54863 +++ linux-2.6.39.4/include/linux/uaccess.h 2011-08-05 19:44:37.000000000 -0400
54864 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54865 long ret; \
54866 mm_segment_t old_fs = get_fs(); \
54867 \
54868 - set_fs(KERNEL_DS); \
54869 pagefault_disable(); \
54870 + set_fs(KERNEL_DS); \
54871 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54872 - pagefault_enable(); \
54873 set_fs(old_fs); \
54874 + pagefault_enable(); \
54875 ret; \
54876 })
54877
54878 @@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
54879 * Safely read from address @src to the buffer at @dst. If a kernel fault
54880 * happens, handle that and return -EFAULT.
54881 */
54882 -extern long probe_kernel_read(void *dst, void *src, size_t size);
54883 -extern long __probe_kernel_read(void *dst, void *src, size_t size);
54884 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
54885 +extern long __probe_kernel_read(void *dst, const void *src, size_t size);
54886
54887 /*
54888 * probe_kernel_write(): safely attempt to write to a location
54889 @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
54890 * Safely write to address @dst from the buffer at @src. If a kernel fault
54891 * happens, handle that and return -EFAULT.
54892 */
54893 -extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
54894 -extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
54895 +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
54896 +extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
54897
54898 #endif /* __LINUX_UACCESS_H__ */
54899 diff -urNp linux-2.6.39.4/include/linux/unaligned/access_ok.h linux-2.6.39.4/include/linux/unaligned/access_ok.h
54900 --- linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-05-19 00:06:34.000000000 -0400
54901 +++ linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-08-05 19:44:37.000000000 -0400
54902 @@ -6,32 +6,32 @@
54903
54904 static inline u16 get_unaligned_le16(const void *p)
54905 {
54906 - return le16_to_cpup((__le16 *)p);
54907 + return le16_to_cpup((const __le16 *)p);
54908 }
54909
54910 static inline u32 get_unaligned_le32(const void *p)
54911 {
54912 - return le32_to_cpup((__le32 *)p);
54913 + return le32_to_cpup((const __le32 *)p);
54914 }
54915
54916 static inline u64 get_unaligned_le64(const void *p)
54917 {
54918 - return le64_to_cpup((__le64 *)p);
54919 + return le64_to_cpup((const __le64 *)p);
54920 }
54921
54922 static inline u16 get_unaligned_be16(const void *p)
54923 {
54924 - return be16_to_cpup((__be16 *)p);
54925 + return be16_to_cpup((const __be16 *)p);
54926 }
54927
54928 static inline u32 get_unaligned_be32(const void *p)
54929 {
54930 - return be32_to_cpup((__be32 *)p);
54931 + return be32_to_cpup((const __be32 *)p);
54932 }
54933
54934 static inline u64 get_unaligned_be64(const void *p)
54935 {
54936 - return be64_to_cpup((__be64 *)p);
54937 + return be64_to_cpup((const __be64 *)p);
54938 }
54939
54940 static inline void put_unaligned_le16(u16 val, void *p)
54941 diff -urNp linux-2.6.39.4/include/linux/vmalloc.h linux-2.6.39.4/include/linux/vmalloc.h
54942 --- linux-2.6.39.4/include/linux/vmalloc.h 2011-05-19 00:06:34.000000000 -0400
54943 +++ linux-2.6.39.4/include/linux/vmalloc.h 2011-08-05 19:44:37.000000000 -0400
54944 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54945 #define VM_MAP 0x00000004 /* vmap()ed pages */
54946 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54947 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54948 +
54949 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54950 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54951 +#endif
54952 +
54953 /* bits [20..32] reserved for arch specific ioremap internals */
54954
54955 /*
54956 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54957 # endif
54958 #endif
54959
54960 +#define vmalloc(x) \
54961 +({ \
54962 + void *___retval; \
54963 + intoverflow_t ___x = (intoverflow_t)x; \
54964 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54965 + ___retval = NULL; \
54966 + else \
54967 + ___retval = vmalloc((unsigned long)___x); \
54968 + ___retval; \
54969 +})
54970 +
54971 +#define vzalloc(x) \
54972 +({ \
54973 + void *___retval; \
54974 + intoverflow_t ___x = (intoverflow_t)x; \
54975 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54976 + ___retval = NULL; \
54977 + else \
54978 + ___retval = vzalloc((unsigned long)___x); \
54979 + ___retval; \
54980 +})
54981 +
54982 +#define __vmalloc(x, y, z) \
54983 +({ \
54984 + void *___retval; \
54985 + intoverflow_t ___x = (intoverflow_t)x; \
54986 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54987 + ___retval = NULL; \
54988 + else \
54989 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54990 + ___retval; \
54991 +})
54992 +
54993 +#define vmalloc_user(x) \
54994 +({ \
54995 + void *___retval; \
54996 + intoverflow_t ___x = (intoverflow_t)x; \
54997 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54998 + ___retval = NULL; \
54999 + else \
55000 + ___retval = vmalloc_user((unsigned long)___x); \
55001 + ___retval; \
55002 +})
55003 +
55004 +#define vmalloc_exec(x) \
55005 +({ \
55006 + void *___retval; \
55007 + intoverflow_t ___x = (intoverflow_t)x; \
55008 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
55009 + ___retval = NULL; \
55010 + else \
55011 + ___retval = vmalloc_exec((unsigned long)___x); \
55012 + ___retval; \
55013 +})
55014 +
55015 +#define vmalloc_node(x, y) \
55016 +({ \
55017 + void *___retval; \
55018 + intoverflow_t ___x = (intoverflow_t)x; \
55019 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
55020 + ___retval = NULL; \
55021 + else \
55022 + ___retval = vmalloc_node((unsigned long)___x, (y));\
55023 + ___retval; \
55024 +})
55025 +
55026 +#define vzalloc_node(x, y) \
55027 +({ \
55028 + void *___retval; \
55029 + intoverflow_t ___x = (intoverflow_t)x; \
55030 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
55031 + ___retval = NULL; \
55032 + else \
55033 + ___retval = vzalloc_node((unsigned long)___x, (y));\
55034 + ___retval; \
55035 +})
55036 +
55037 +#define vmalloc_32(x) \
55038 +({ \
55039 + void *___retval; \
55040 + intoverflow_t ___x = (intoverflow_t)x; \
55041 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
55042 + ___retval = NULL; \
55043 + else \
55044 + ___retval = vmalloc_32((unsigned long)___x); \
55045 + ___retval; \
55046 +})
55047 +
55048 +#define vmalloc_32_user(x) \
55049 +({ \
55050 +void *___retval; \
55051 + intoverflow_t ___x = (intoverflow_t)x; \
55052 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
55053 + ___retval = NULL; \
55054 + else \
55055 + ___retval = vmalloc_32_user((unsigned long)___x);\
55056 + ___retval; \
55057 +})
55058 +
55059 #endif /* _LINUX_VMALLOC_H */
55060 diff -urNp linux-2.6.39.4/include/linux/vmstat.h linux-2.6.39.4/include/linux/vmstat.h
55061 --- linux-2.6.39.4/include/linux/vmstat.h 2011-05-19 00:06:34.000000000 -0400
55062 +++ linux-2.6.39.4/include/linux/vmstat.h 2011-08-05 19:44:37.000000000 -0400
55063 @@ -147,18 +147,18 @@ static inline void vm_events_fold_cpu(in
55064 /*
55065 * Zone based page accounting with per cpu differentials.
55066 */
55067 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55068 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55069
55070 static inline void zone_page_state_add(long x, struct zone *zone,
55071 enum zone_stat_item item)
55072 {
55073 - atomic_long_add(x, &zone->vm_stat[item]);
55074 - atomic_long_add(x, &vm_stat[item]);
55075 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
55076 + atomic_long_add_unchecked(x, &vm_stat[item]);
55077 }
55078
55079 static inline unsigned long global_page_state(enum zone_stat_item item)
55080 {
55081 - long x = atomic_long_read(&vm_stat[item]);
55082 + long x = atomic_long_read_unchecked(&vm_stat[item]);
55083 #ifdef CONFIG_SMP
55084 if (x < 0)
55085 x = 0;
55086 @@ -169,7 +169,7 @@ static inline unsigned long global_page_
55087 static inline unsigned long zone_page_state(struct zone *zone,
55088 enum zone_stat_item item)
55089 {
55090 - long x = atomic_long_read(&zone->vm_stat[item]);
55091 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55092 #ifdef CONFIG_SMP
55093 if (x < 0)
55094 x = 0;
55095 @@ -186,7 +186,7 @@ static inline unsigned long zone_page_st
55096 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
55097 enum zone_stat_item item)
55098 {
55099 - long x = atomic_long_read(&zone->vm_stat[item]);
55100 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55101
55102 #ifdef CONFIG_SMP
55103 int cpu;
55104 @@ -280,8 +280,8 @@ static inline void __mod_zone_page_state
55105
55106 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
55107 {
55108 - atomic_long_inc(&zone->vm_stat[item]);
55109 - atomic_long_inc(&vm_stat[item]);
55110 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
55111 + atomic_long_inc_unchecked(&vm_stat[item]);
55112 }
55113
55114 static inline void __inc_zone_page_state(struct page *page,
55115 @@ -292,8 +292,8 @@ static inline void __inc_zone_page_state
55116
55117 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
55118 {
55119 - atomic_long_dec(&zone->vm_stat[item]);
55120 - atomic_long_dec(&vm_stat[item]);
55121 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
55122 + atomic_long_dec_unchecked(&vm_stat[item]);
55123 }
55124
55125 static inline void __dec_zone_page_state(struct page *page,
55126 diff -urNp linux-2.6.39.4/include/media/saa7146_vv.h linux-2.6.39.4/include/media/saa7146_vv.h
55127 --- linux-2.6.39.4/include/media/saa7146_vv.h 2011-05-19 00:06:34.000000000 -0400
55128 +++ linux-2.6.39.4/include/media/saa7146_vv.h 2011-08-05 20:34:06.000000000 -0400
55129 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
55130 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
55131
55132 /* the extension can override this */
55133 - struct v4l2_ioctl_ops ops;
55134 + v4l2_ioctl_ops_no_const ops;
55135 /* pointer to the saa7146 core ops */
55136 const struct v4l2_ioctl_ops *core_ops;
55137
55138 diff -urNp linux-2.6.39.4/include/media/v4l2-dev.h linux-2.6.39.4/include/media/v4l2-dev.h
55139 --- linux-2.6.39.4/include/media/v4l2-dev.h 2011-05-19 00:06:34.000000000 -0400
55140 +++ linux-2.6.39.4/include/media/v4l2-dev.h 2011-08-05 20:34:06.000000000 -0400
55141 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
55142
55143
55144 struct v4l2_file_operations {
55145 - struct module *owner;
55146 + struct module * const owner;
55147 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
55148 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
55149 unsigned int (*poll) (struct file *, struct poll_table_struct *);
55150 diff -urNp linux-2.6.39.4/include/media/v4l2-device.h linux-2.6.39.4/include/media/v4l2-device.h
55151 --- linux-2.6.39.4/include/media/v4l2-device.h 2011-05-19 00:06:34.000000000 -0400
55152 +++ linux-2.6.39.4/include/media/v4l2-device.h 2011-08-05 19:44:37.000000000 -0400
55153 @@ -95,7 +95,7 @@ int __must_check v4l2_device_register(st
55154 this function returns 0. If the name ends with a digit (e.g. cx18),
55155 then the name will be set to cx18-0 since cx180 looks really odd. */
55156 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
55157 - atomic_t *instance);
55158 + atomic_unchecked_t *instance);
55159
55160 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
55161 Since the parent disappears this ensures that v4l2_dev doesn't have an
55162 diff -urNp linux-2.6.39.4/include/media/v4l2-ioctl.h linux-2.6.39.4/include/media/v4l2-ioctl.h
55163 --- linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-05-19 00:06:34.000000000 -0400
55164 +++ linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-08-05 20:34:06.000000000 -0400
55165 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
55166 long (*vidioc_default) (struct file *file, void *fh,
55167 bool valid_prio, int cmd, void *arg);
55168 };
55169 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
55170
55171
55172 /* v4l debugging and diagnostics */
55173 diff -urNp linux-2.6.39.4/include/net/caif/cfctrl.h linux-2.6.39.4/include/net/caif/cfctrl.h
55174 --- linux-2.6.39.4/include/net/caif/cfctrl.h 2011-05-19 00:06:34.000000000 -0400
55175 +++ linux-2.6.39.4/include/net/caif/cfctrl.h 2011-08-05 20:34:06.000000000 -0400
55176 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
55177 void (*radioset_rsp)(void);
55178 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
55179 struct cflayer *client_layer);
55180 -};
55181 +} __no_const;
55182
55183 /* Link Setup Parameters for CAIF-Links. */
55184 struct cfctrl_link_param {
55185 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
55186 struct cfctrl {
55187 struct cfsrvl serv;
55188 struct cfctrl_rsp res;
55189 - atomic_t req_seq_no;
55190 - atomic_t rsp_seq_no;
55191 + atomic_unchecked_t req_seq_no;
55192 + atomic_unchecked_t rsp_seq_no;
55193 struct list_head list;
55194 /* Protects from simultaneous access to first_req list */
55195 spinlock_t info_list_lock;
55196 diff -urNp linux-2.6.39.4/include/net/flow.h linux-2.6.39.4/include/net/flow.h
55197 --- linux-2.6.39.4/include/net/flow.h 2011-05-19 00:06:34.000000000 -0400
55198 +++ linux-2.6.39.4/include/net/flow.h 2011-08-05 19:44:37.000000000 -0400
55199 @@ -167,6 +167,6 @@ extern struct flow_cache_object *flow_ca
55200 u8 dir, flow_resolve_t resolver, void *ctx);
55201
55202 extern void flow_cache_flush(void);
55203 -extern atomic_t flow_cache_genid;
55204 +extern atomic_unchecked_t flow_cache_genid;
55205
55206 #endif
55207 diff -urNp linux-2.6.39.4/include/net/inetpeer.h linux-2.6.39.4/include/net/inetpeer.h
55208 --- linux-2.6.39.4/include/net/inetpeer.h 2011-05-19 00:06:34.000000000 -0400
55209 +++ linux-2.6.39.4/include/net/inetpeer.h 2011-08-05 19:44:37.000000000 -0400
55210 @@ -43,8 +43,8 @@ struct inet_peer {
55211 */
55212 union {
55213 struct {
55214 - atomic_t rid; /* Frag reception counter */
55215 - atomic_t ip_id_count; /* IP ID for the next packet */
55216 + atomic_unchecked_t rid; /* Frag reception counter */
55217 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
55218 __u32 tcp_ts;
55219 __u32 tcp_ts_stamp;
55220 u32 metrics[RTAX_MAX];
55221 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
55222 {
55223 more++;
55224 inet_peer_refcheck(p);
55225 - return atomic_add_return(more, &p->ip_id_count) - more;
55226 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
55227 }
55228
55229 #endif /* _NET_INETPEER_H */
55230 diff -urNp linux-2.6.39.4/include/net/ip_fib.h linux-2.6.39.4/include/net/ip_fib.h
55231 --- linux-2.6.39.4/include/net/ip_fib.h 2011-05-19 00:06:34.000000000 -0400
55232 +++ linux-2.6.39.4/include/net/ip_fib.h 2011-08-05 19:44:37.000000000 -0400
55233 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
55234
55235 #define FIB_RES_SADDR(net, res) \
55236 ((FIB_RES_NH(res).nh_saddr_genid == \
55237 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55238 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55239 FIB_RES_NH(res).nh_saddr : \
55240 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55241 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55242 diff -urNp linux-2.6.39.4/include/net/ip_vs.h linux-2.6.39.4/include/net/ip_vs.h
55243 --- linux-2.6.39.4/include/net/ip_vs.h 2011-07-09 09:18:51.000000000 -0400
55244 +++ linux-2.6.39.4/include/net/ip_vs.h 2011-08-05 19:44:37.000000000 -0400
55245 @@ -512,7 +512,7 @@ struct ip_vs_conn {
55246 struct ip_vs_conn *control; /* Master control connection */
55247 atomic_t n_control; /* Number of controlled ones */
55248 struct ip_vs_dest *dest; /* real server */
55249 - atomic_t in_pkts; /* incoming packet counter */
55250 + atomic_unchecked_t in_pkts; /* incoming packet counter */
55251
55252 /* packet transmitter for different forwarding methods. If it
55253 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55254 @@ -650,7 +650,7 @@ struct ip_vs_dest {
55255 __be16 port; /* port number of the server */
55256 union nf_inet_addr addr; /* IP address of the server */
55257 volatile unsigned flags; /* dest status flags */
55258 - atomic_t conn_flags; /* flags to copy to conn */
55259 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
55260 atomic_t weight; /* server weight */
55261
55262 atomic_t refcnt; /* reference counter */
55263 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_core.h linux-2.6.39.4/include/net/irda/ircomm_core.h
55264 --- linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-05-19 00:06:34.000000000 -0400
55265 +++ linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-08-05 20:34:06.000000000 -0400
55266 @@ -51,7 +51,7 @@ typedef struct {
55267 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55268 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55269 struct ircomm_info *);
55270 -} call_t;
55271 +} __no_const call_t;
55272
55273 struct ircomm_cb {
55274 irda_queue_t queue;
55275 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_tty.h linux-2.6.39.4/include/net/irda/ircomm_tty.h
55276 --- linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-05-19 00:06:34.000000000 -0400
55277 +++ linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-08-05 19:44:37.000000000 -0400
55278 @@ -35,6 +35,7 @@
55279 #include <linux/termios.h>
55280 #include <linux/timer.h>
55281 #include <linux/tty.h> /* struct tty_struct */
55282 +#include <asm/local.h>
55283
55284 #include <net/irda/irias_object.h>
55285 #include <net/irda/ircomm_core.h>
55286 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55287 unsigned short close_delay;
55288 unsigned short closing_wait; /* time to wait before closing */
55289
55290 - int open_count;
55291 - int blocked_open; /* # of blocked opens */
55292 + local_t open_count;
55293 + local_t blocked_open; /* # of blocked opens */
55294
55295 /* Protect concurent access to :
55296 * o self->open_count
55297 diff -urNp linux-2.6.39.4/include/net/iucv/af_iucv.h linux-2.6.39.4/include/net/iucv/af_iucv.h
55298 --- linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-05-19 00:06:34.000000000 -0400
55299 +++ linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-08-05 19:44:37.000000000 -0400
55300 @@ -87,7 +87,7 @@ struct iucv_sock {
55301 struct iucv_sock_list {
55302 struct hlist_head head;
55303 rwlock_t lock;
55304 - atomic_t autobind_name;
55305 + atomic_unchecked_t autobind_name;
55306 };
55307
55308 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55309 diff -urNp linux-2.6.39.4/include/net/lapb.h linux-2.6.39.4/include/net/lapb.h
55310 --- linux-2.6.39.4/include/net/lapb.h 2011-05-19 00:06:34.000000000 -0400
55311 +++ linux-2.6.39.4/include/net/lapb.h 2011-08-05 20:34:06.000000000 -0400
55312 @@ -95,7 +95,7 @@ struct lapb_cb {
55313 struct sk_buff_head write_queue;
55314 struct sk_buff_head ack_queue;
55315 unsigned char window;
55316 - struct lapb_register_struct callbacks;
55317 + struct lapb_register_struct *callbacks;
55318
55319 /* FRMR control information */
55320 struct lapb_frame frmr_data;
55321 diff -urNp linux-2.6.39.4/include/net/neighbour.h linux-2.6.39.4/include/net/neighbour.h
55322 --- linux-2.6.39.4/include/net/neighbour.h 2011-05-19 00:06:34.000000000 -0400
55323 +++ linux-2.6.39.4/include/net/neighbour.h 2011-08-05 20:34:06.000000000 -0400
55324 @@ -117,7 +117,7 @@ struct neighbour {
55325 };
55326
55327 struct neigh_ops {
55328 - int family;
55329 + const int family;
55330 void (*solicit)(struct neighbour *, struct sk_buff*);
55331 void (*error_report)(struct neighbour *, struct sk_buff*);
55332 int (*output)(struct sk_buff*);
55333 diff -urNp linux-2.6.39.4/include/net/netlink.h linux-2.6.39.4/include/net/netlink.h
55334 --- linux-2.6.39.4/include/net/netlink.h 2011-05-19 00:06:34.000000000 -0400
55335 +++ linux-2.6.39.4/include/net/netlink.h 2011-08-05 19:44:37.000000000 -0400
55336 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55337 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55338 {
55339 if (mark)
55340 - skb_trim(skb, (unsigned char *) mark - skb->data);
55341 + skb_trim(skb, (const unsigned char *) mark - skb->data);
55342 }
55343
55344 /**
55345 diff -urNp linux-2.6.39.4/include/net/netns/ipv4.h linux-2.6.39.4/include/net/netns/ipv4.h
55346 --- linux-2.6.39.4/include/net/netns/ipv4.h 2011-05-19 00:06:34.000000000 -0400
55347 +++ linux-2.6.39.4/include/net/netns/ipv4.h 2011-08-05 19:44:37.000000000 -0400
55348 @@ -54,8 +54,8 @@ struct netns_ipv4 {
55349 int sysctl_rt_cache_rebuild_count;
55350 int current_rt_cache_rebuild_count;
55351
55352 - atomic_t rt_genid;
55353 - atomic_t dev_addr_genid;
55354 + atomic_unchecked_t rt_genid;
55355 + atomic_unchecked_t dev_addr_genid;
55356
55357 #ifdef CONFIG_IP_MROUTE
55358 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55359 diff -urNp linux-2.6.39.4/include/net/sctp/sctp.h linux-2.6.39.4/include/net/sctp/sctp.h
55360 --- linux-2.6.39.4/include/net/sctp/sctp.h 2011-05-19 00:06:34.000000000 -0400
55361 +++ linux-2.6.39.4/include/net/sctp/sctp.h 2011-08-05 19:44:37.000000000 -0400
55362 @@ -316,9 +316,9 @@ do { \
55363
55364 #else /* SCTP_DEBUG */
55365
55366 -#define SCTP_DEBUG_PRINTK(whatever...)
55367 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55368 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55369 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55370 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55371 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55372 #define SCTP_ENABLE_DEBUG
55373 #define SCTP_DISABLE_DEBUG
55374 #define SCTP_ASSERT(expr, str, func)
55375 diff -urNp linux-2.6.39.4/include/net/sock.h linux-2.6.39.4/include/net/sock.h
55376 --- linux-2.6.39.4/include/net/sock.h 2011-05-19 00:06:34.000000000 -0400
55377 +++ linux-2.6.39.4/include/net/sock.h 2011-08-05 19:44:37.000000000 -0400
55378 @@ -277,7 +277,7 @@ struct sock {
55379 #ifdef CONFIG_RPS
55380 __u32 sk_rxhash;
55381 #endif
55382 - atomic_t sk_drops;
55383 + atomic_unchecked_t sk_drops;
55384 int sk_rcvbuf;
55385
55386 struct sk_filter __rcu *sk_filter;
55387 diff -urNp linux-2.6.39.4/include/net/tcp.h linux-2.6.39.4/include/net/tcp.h
55388 --- linux-2.6.39.4/include/net/tcp.h 2011-05-19 00:06:34.000000000 -0400
55389 +++ linux-2.6.39.4/include/net/tcp.h 2011-08-05 20:34:06.000000000 -0400
55390 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55391 struct tcp_seq_afinfo {
55392 char *name;
55393 sa_family_t family;
55394 - struct file_operations seq_fops;
55395 - struct seq_operations seq_ops;
55396 + file_operations_no_const seq_fops;
55397 + seq_operations_no_const seq_ops;
55398 };
55399
55400 struct tcp_iter_state {
55401 diff -urNp linux-2.6.39.4/include/net/udp.h linux-2.6.39.4/include/net/udp.h
55402 --- linux-2.6.39.4/include/net/udp.h 2011-05-19 00:06:34.000000000 -0400
55403 +++ linux-2.6.39.4/include/net/udp.h 2011-08-05 20:34:06.000000000 -0400
55404 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55405 char *name;
55406 sa_family_t family;
55407 struct udp_table *udp_table;
55408 - struct file_operations seq_fops;
55409 - struct seq_operations seq_ops;
55410 + file_operations_no_const seq_fops;
55411 + seq_operations_no_const seq_ops;
55412 };
55413
55414 struct udp_iter_state {
55415 diff -urNp linux-2.6.39.4/include/net/xfrm.h linux-2.6.39.4/include/net/xfrm.h
55416 --- linux-2.6.39.4/include/net/xfrm.h 2011-05-19 00:06:34.000000000 -0400
55417 +++ linux-2.6.39.4/include/net/xfrm.h 2011-08-05 19:44:37.000000000 -0400
55418 @@ -505,7 +505,7 @@ struct xfrm_policy {
55419 struct timer_list timer;
55420
55421 struct flow_cache_object flo;
55422 - atomic_t genid;
55423 + atomic_unchecked_t genid;
55424 u32 priority;
55425 u32 index;
55426 struct xfrm_mark mark;
55427 diff -urNp linux-2.6.39.4/include/rdma/iw_cm.h linux-2.6.39.4/include/rdma/iw_cm.h
55428 --- linux-2.6.39.4/include/rdma/iw_cm.h 2011-05-19 00:06:34.000000000 -0400
55429 +++ linux-2.6.39.4/include/rdma/iw_cm.h 2011-08-05 20:34:06.000000000 -0400
55430 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
55431 int backlog);
55432
55433 int (*destroy_listen)(struct iw_cm_id *cm_id);
55434 -};
55435 +} __no_const;
55436
55437 /**
55438 * iw_create_cm_id - Create an IW CM identifier.
55439 diff -urNp linux-2.6.39.4/include/scsi/libfc.h linux-2.6.39.4/include/scsi/libfc.h
55440 --- linux-2.6.39.4/include/scsi/libfc.h 2011-05-19 00:06:34.000000000 -0400
55441 +++ linux-2.6.39.4/include/scsi/libfc.h 2011-08-05 20:34:06.000000000 -0400
55442 @@ -750,6 +750,7 @@ struct libfc_function_template {
55443 */
55444 void (*disc_stop_final) (struct fc_lport *);
55445 };
55446 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55447
55448 /**
55449 * struct fc_disc - Discovery context
55450 @@ -853,7 +854,7 @@ struct fc_lport {
55451 struct fc_vport *vport;
55452
55453 /* Operational Information */
55454 - struct libfc_function_template tt;
55455 + libfc_function_template_no_const tt;
55456 u8 link_up;
55457 u8 qfull;
55458 enum fc_lport_state state;
55459 diff -urNp linux-2.6.39.4/include/scsi/scsi_device.h linux-2.6.39.4/include/scsi/scsi_device.h
55460 --- linux-2.6.39.4/include/scsi/scsi_device.h 2011-05-19 00:06:34.000000000 -0400
55461 +++ linux-2.6.39.4/include/scsi/scsi_device.h 2011-08-05 19:44:37.000000000 -0400
55462 @@ -161,9 +161,9 @@ struct scsi_device {
55463 unsigned int max_device_blocked; /* what device_blocked counts down from */
55464 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55465
55466 - atomic_t iorequest_cnt;
55467 - atomic_t iodone_cnt;
55468 - atomic_t ioerr_cnt;
55469 + atomic_unchecked_t iorequest_cnt;
55470 + atomic_unchecked_t iodone_cnt;
55471 + atomic_unchecked_t ioerr_cnt;
55472
55473 struct device sdev_gendev,
55474 sdev_dev;
55475 diff -urNp linux-2.6.39.4/include/scsi/scsi_transport_fc.h linux-2.6.39.4/include/scsi/scsi_transport_fc.h
55476 --- linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-05-19 00:06:34.000000000 -0400
55477 +++ linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-08-05 20:34:06.000000000 -0400
55478 @@ -666,9 +666,9 @@ struct fc_function_template {
55479 int (*bsg_timeout)(struct fc_bsg_job *);
55480
55481 /* allocation lengths for host-specific data */
55482 - u32 dd_fcrport_size;
55483 - u32 dd_fcvport_size;
55484 - u32 dd_bsg_size;
55485 + const u32 dd_fcrport_size;
55486 + const u32 dd_fcvport_size;
55487 + const u32 dd_bsg_size;
55488
55489 /*
55490 * The driver sets these to tell the transport class it
55491 @@ -678,39 +678,39 @@ struct fc_function_template {
55492 */
55493
55494 /* remote port fixed attributes */
55495 - unsigned long show_rport_maxframe_size:1;
55496 - unsigned long show_rport_supported_classes:1;
55497 - unsigned long show_rport_dev_loss_tmo:1;
55498 + const unsigned long show_rport_maxframe_size:1;
55499 + const unsigned long show_rport_supported_classes:1;
55500 + const unsigned long show_rport_dev_loss_tmo:1;
55501
55502 /*
55503 * target dynamic attributes
55504 * These should all be "1" if the driver uses the remote port
55505 * add/delete functions (so attributes reflect rport values).
55506 */
55507 - unsigned long show_starget_node_name:1;
55508 - unsigned long show_starget_port_name:1;
55509 - unsigned long show_starget_port_id:1;
55510 + const unsigned long show_starget_node_name:1;
55511 + const unsigned long show_starget_port_name:1;
55512 + const unsigned long show_starget_port_id:1;
55513
55514 /* host fixed attributes */
55515 - unsigned long show_host_node_name:1;
55516 - unsigned long show_host_port_name:1;
55517 - unsigned long show_host_permanent_port_name:1;
55518 - unsigned long show_host_supported_classes:1;
55519 - unsigned long show_host_supported_fc4s:1;
55520 - unsigned long show_host_supported_speeds:1;
55521 - unsigned long show_host_maxframe_size:1;
55522 - unsigned long show_host_serial_number:1;
55523 + const unsigned long show_host_node_name:1;
55524 + const unsigned long show_host_port_name:1;
55525 + const unsigned long show_host_permanent_port_name:1;
55526 + const unsigned long show_host_supported_classes:1;
55527 + const unsigned long show_host_supported_fc4s:1;
55528 + const unsigned long show_host_supported_speeds:1;
55529 + const unsigned long show_host_maxframe_size:1;
55530 + const unsigned long show_host_serial_number:1;
55531 /* host dynamic attributes */
55532 - unsigned long show_host_port_id:1;
55533 - unsigned long show_host_port_type:1;
55534 - unsigned long show_host_port_state:1;
55535 - unsigned long show_host_active_fc4s:1;
55536 - unsigned long show_host_speed:1;
55537 - unsigned long show_host_fabric_name:1;
55538 - unsigned long show_host_symbolic_name:1;
55539 - unsigned long show_host_system_hostname:1;
55540 + const unsigned long show_host_port_id:1;
55541 + const unsigned long show_host_port_type:1;
55542 + const unsigned long show_host_port_state:1;
55543 + const unsigned long show_host_active_fc4s:1;
55544 + const unsigned long show_host_speed:1;
55545 + const unsigned long show_host_fabric_name:1;
55546 + const unsigned long show_host_symbolic_name:1;
55547 + const unsigned long show_host_system_hostname:1;
55548
55549 - unsigned long disable_target_scan:1;
55550 + const unsigned long disable_target_scan:1;
55551 };
55552
55553
55554 diff -urNp linux-2.6.39.4/include/sound/ak4xxx-adda.h linux-2.6.39.4/include/sound/ak4xxx-adda.h
55555 --- linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-05-19 00:06:34.000000000 -0400
55556 +++ linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-08-05 20:34:06.000000000 -0400
55557 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55558 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55559 unsigned char val);
55560 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55561 -};
55562 +} __no_const;
55563
55564 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55565
55566 diff -urNp linux-2.6.39.4/include/sound/hwdep.h linux-2.6.39.4/include/sound/hwdep.h
55567 --- linux-2.6.39.4/include/sound/hwdep.h 2011-05-19 00:06:34.000000000 -0400
55568 +++ linux-2.6.39.4/include/sound/hwdep.h 2011-08-05 20:34:06.000000000 -0400
55569 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55570 struct snd_hwdep_dsp_status *status);
55571 int (*dsp_load)(struct snd_hwdep *hw,
55572 struct snd_hwdep_dsp_image *image);
55573 -};
55574 +} __no_const;
55575
55576 struct snd_hwdep {
55577 struct snd_card *card;
55578 diff -urNp linux-2.6.39.4/include/sound/info.h linux-2.6.39.4/include/sound/info.h
55579 --- linux-2.6.39.4/include/sound/info.h 2011-05-19 00:06:34.000000000 -0400
55580 +++ linux-2.6.39.4/include/sound/info.h 2011-08-05 20:34:06.000000000 -0400
55581 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
55582 struct snd_info_buffer *buffer);
55583 void (*write)(struct snd_info_entry *entry,
55584 struct snd_info_buffer *buffer);
55585 -};
55586 +} __no_const;
55587
55588 struct snd_info_entry_ops {
55589 int (*open)(struct snd_info_entry *entry,
55590 diff -urNp linux-2.6.39.4/include/sound/pcm.h linux-2.6.39.4/include/sound/pcm.h
55591 --- linux-2.6.39.4/include/sound/pcm.h 2011-05-19 00:06:34.000000000 -0400
55592 +++ linux-2.6.39.4/include/sound/pcm.h 2011-08-05 20:34:06.000000000 -0400
55593 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
55594 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55595 int (*ack)(struct snd_pcm_substream *substream);
55596 };
55597 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55598
55599 /*
55600 *
55601 diff -urNp linux-2.6.39.4/include/sound/sb16_csp.h linux-2.6.39.4/include/sound/sb16_csp.h
55602 --- linux-2.6.39.4/include/sound/sb16_csp.h 2011-05-19 00:06:34.000000000 -0400
55603 +++ linux-2.6.39.4/include/sound/sb16_csp.h 2011-08-05 20:34:06.000000000 -0400
55604 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
55605 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55606 int (*csp_stop) (struct snd_sb_csp * p);
55607 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55608 -};
55609 +} __no_const;
55610
55611 /*
55612 * CSP private data
55613 diff -urNp linux-2.6.39.4/include/sound/soc.h linux-2.6.39.4/include/sound/soc.h
55614 --- linux-2.6.39.4/include/sound/soc.h 2011-05-19 00:06:34.000000000 -0400
55615 +++ linux-2.6.39.4/include/sound/soc.h 2011-08-05 20:34:06.000000000 -0400
55616 @@ -624,7 +624,7 @@ struct snd_soc_platform_driver {
55617 struct snd_soc_dai *);
55618
55619 /* platform stream ops */
55620 - struct snd_pcm_ops *ops;
55621 + struct snd_pcm_ops * const ops;
55622 };
55623
55624 struct snd_soc_platform {
55625 diff -urNp linux-2.6.39.4/include/sound/ymfpci.h linux-2.6.39.4/include/sound/ymfpci.h
55626 --- linux-2.6.39.4/include/sound/ymfpci.h 2011-05-19 00:06:34.000000000 -0400
55627 +++ linux-2.6.39.4/include/sound/ymfpci.h 2011-08-05 19:44:37.000000000 -0400
55628 @@ -358,7 +358,7 @@ struct snd_ymfpci {
55629 spinlock_t reg_lock;
55630 spinlock_t voice_lock;
55631 wait_queue_head_t interrupt_sleep;
55632 - atomic_t interrupt_sleep_count;
55633 + atomic_unchecked_t interrupt_sleep_count;
55634 struct snd_info_entry *proc_entry;
55635 const struct firmware *dsp_microcode;
55636 const struct firmware *controller_microcode;
55637 diff -urNp linux-2.6.39.4/include/target/target_core_base.h linux-2.6.39.4/include/target/target_core_base.h
55638 --- linux-2.6.39.4/include/target/target_core_base.h 2011-06-03 00:04:14.000000000 -0400
55639 +++ linux-2.6.39.4/include/target/target_core_base.h 2011-08-05 20:34:06.000000000 -0400
55640 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
55641 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55642 int (*t10_pr_register)(struct se_cmd *);
55643 int (*t10_pr_clear)(struct se_cmd *);
55644 -};
55645 +} __no_const;
55646
55647 struct t10_reservation_template {
55648 /* Reservation effects all target ports */
55649 @@ -432,8 +432,8 @@ struct se_transport_task {
55650 atomic_t t_task_cdbs_left;
55651 atomic_t t_task_cdbs_ex_left;
55652 atomic_t t_task_cdbs_timeout_left;
55653 - atomic_t t_task_cdbs_sent;
55654 - atomic_t t_transport_aborted;
55655 + atomic_unchecked_t t_task_cdbs_sent;
55656 + atomic_unchecked_t t_transport_aborted;
55657 atomic_t t_transport_active;
55658 atomic_t t_transport_complete;
55659 atomic_t t_transport_queue_active;
55660 @@ -774,7 +774,7 @@ struct se_device {
55661 atomic_t active_cmds;
55662 atomic_t simple_cmds;
55663 atomic_t depth_left;
55664 - atomic_t dev_ordered_id;
55665 + atomic_unchecked_t dev_ordered_id;
55666 atomic_t dev_tur_active;
55667 atomic_t execute_tasks;
55668 atomic_t dev_status_thr_count;
55669 diff -urNp linux-2.6.39.4/include/trace/events/irq.h linux-2.6.39.4/include/trace/events/irq.h
55670 --- linux-2.6.39.4/include/trace/events/irq.h 2011-05-19 00:06:34.000000000 -0400
55671 +++ linux-2.6.39.4/include/trace/events/irq.h 2011-08-05 19:44:37.000000000 -0400
55672 @@ -36,7 +36,7 @@ struct softirq_action;
55673 */
55674 TRACE_EVENT(irq_handler_entry,
55675
55676 - TP_PROTO(int irq, struct irqaction *action),
55677 + TP_PROTO(int irq, const struct irqaction *action),
55678
55679 TP_ARGS(irq, action),
55680
55681 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55682 */
55683 TRACE_EVENT(irq_handler_exit,
55684
55685 - TP_PROTO(int irq, struct irqaction *action, int ret),
55686 + TP_PROTO(int irq, const struct irqaction *action, int ret),
55687
55688 TP_ARGS(irq, action, ret),
55689
55690 diff -urNp linux-2.6.39.4/include/video/udlfb.h linux-2.6.39.4/include/video/udlfb.h
55691 --- linux-2.6.39.4/include/video/udlfb.h 2011-05-19 00:06:34.000000000 -0400
55692 +++ linux-2.6.39.4/include/video/udlfb.h 2011-08-05 19:44:37.000000000 -0400
55693 @@ -51,10 +51,10 @@ struct dlfb_data {
55694 int base8;
55695 u32 pseudo_palette[256];
55696 /* blit-only rendering path metrics, exposed through sysfs */
55697 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55698 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55699 - atomic_t bytes_sent; /* to usb, after compression including overhead */
55700 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55701 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55702 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55703 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55704 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55705 };
55706
55707 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55708 diff -urNp linux-2.6.39.4/include/video/uvesafb.h linux-2.6.39.4/include/video/uvesafb.h
55709 --- linux-2.6.39.4/include/video/uvesafb.h 2011-05-19 00:06:34.000000000 -0400
55710 +++ linux-2.6.39.4/include/video/uvesafb.h 2011-08-05 19:44:37.000000000 -0400
55711 @@ -177,6 +177,7 @@ struct uvesafb_par {
55712 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55713 u8 pmi_setpal; /* PMI for palette changes */
55714 u16 *pmi_base; /* protected mode interface location */
55715 + u8 *pmi_code; /* protected mode code location */
55716 void *pmi_start;
55717 void *pmi_pal;
55718 u8 *vbe_state_orig; /*
55719 diff -urNp linux-2.6.39.4/init/do_mounts.c linux-2.6.39.4/init/do_mounts.c
55720 --- linux-2.6.39.4/init/do_mounts.c 2011-05-19 00:06:34.000000000 -0400
55721 +++ linux-2.6.39.4/init/do_mounts.c 2011-08-05 19:44:37.000000000 -0400
55722 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55723
55724 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55725 {
55726 - int err = sys_mount(name, "/root", fs, flags, data);
55727 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55728 if (err)
55729 return err;
55730
55731 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55732 va_start(args, fmt);
55733 vsprintf(buf, fmt, args);
55734 va_end(args);
55735 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55736 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55737 if (fd >= 0) {
55738 sys_ioctl(fd, FDEJECT, 0);
55739 sys_close(fd);
55740 }
55741 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55742 - fd = sys_open("/dev/console", O_RDWR, 0);
55743 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55744 if (fd >= 0) {
55745 sys_ioctl(fd, TCGETS, (long)&termios);
55746 termios.c_lflag &= ~ICANON;
55747 sys_ioctl(fd, TCSETSF, (long)&termios);
55748 - sys_read(fd, &c, 1);
55749 + sys_read(fd, (char __user *)&c, 1);
55750 termios.c_lflag |= ICANON;
55751 sys_ioctl(fd, TCSETSF, (long)&termios);
55752 sys_close(fd);
55753 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55754 mount_root();
55755 out:
55756 devtmpfs_mount("dev");
55757 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55758 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55759 sys_chroot((const char __user __force *)".");
55760 }
55761 diff -urNp linux-2.6.39.4/init/do_mounts.h linux-2.6.39.4/init/do_mounts.h
55762 --- linux-2.6.39.4/init/do_mounts.h 2011-05-19 00:06:34.000000000 -0400
55763 +++ linux-2.6.39.4/init/do_mounts.h 2011-08-05 19:44:37.000000000 -0400
55764 @@ -15,15 +15,15 @@ extern int root_mountflags;
55765
55766 static inline int create_dev(char *name, dev_t dev)
55767 {
55768 - sys_unlink(name);
55769 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55770 + sys_unlink((__force char __user *)name);
55771 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55772 }
55773
55774 #if BITS_PER_LONG == 32
55775 static inline u32 bstat(char *name)
55776 {
55777 struct stat64 stat;
55778 - if (sys_stat64(name, &stat) != 0)
55779 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55780 return 0;
55781 if (!S_ISBLK(stat.st_mode))
55782 return 0;
55783 diff -urNp linux-2.6.39.4/init/do_mounts_initrd.c linux-2.6.39.4/init/do_mounts_initrd.c
55784 --- linux-2.6.39.4/init/do_mounts_initrd.c 2011-05-19 00:06:34.000000000 -0400
55785 +++ linux-2.6.39.4/init/do_mounts_initrd.c 2011-08-05 19:44:37.000000000 -0400
55786 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55787 create_dev("/dev/root.old", Root_RAM0);
55788 /* mount initrd on rootfs' /root */
55789 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55790 - sys_mkdir("/old", 0700);
55791 - root_fd = sys_open("/", 0, 0);
55792 - old_fd = sys_open("/old", 0, 0);
55793 + sys_mkdir((__force const char __user *)"/old", 0700);
55794 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
55795 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55796 /* move initrd over / and chdir/chroot in initrd root */
55797 - sys_chdir("/root");
55798 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55799 - sys_chroot(".");
55800 + sys_chdir((__force const char __user *)"/root");
55801 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55802 + sys_chroot((__force const char __user *)".");
55803
55804 /*
55805 * In case that a resume from disk is carried out by linuxrc or one of
55806 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55807
55808 /* move initrd to rootfs' /old */
55809 sys_fchdir(old_fd);
55810 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
55811 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55812 /* switch root and cwd back to / of rootfs */
55813 sys_fchdir(root_fd);
55814 - sys_chroot(".");
55815 + sys_chroot((__force const char __user *)".");
55816 sys_close(old_fd);
55817 sys_close(root_fd);
55818
55819 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55820 - sys_chdir("/old");
55821 + sys_chdir((__force const char __user *)"/old");
55822 return;
55823 }
55824
55825 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55826 mount_root();
55827
55828 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55829 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55830 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55831 if (!error)
55832 printk("okay\n");
55833 else {
55834 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
55835 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55836 if (error == -ENOENT)
55837 printk("/initrd does not exist. Ignored.\n");
55838 else
55839 printk("failed\n");
55840 printk(KERN_NOTICE "Unmounting old root\n");
55841 - sys_umount("/old", MNT_DETACH);
55842 + sys_umount((__force char __user *)"/old", MNT_DETACH);
55843 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55844 if (fd < 0) {
55845 error = fd;
55846 @@ -116,11 +116,11 @@ int __init initrd_load(void)
55847 * mounted in the normal path.
55848 */
55849 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55850 - sys_unlink("/initrd.image");
55851 + sys_unlink((__force const char __user *)"/initrd.image");
55852 handle_initrd();
55853 return 1;
55854 }
55855 }
55856 - sys_unlink("/initrd.image");
55857 + sys_unlink((__force const char __user *)"/initrd.image");
55858 return 0;
55859 }
55860 diff -urNp linux-2.6.39.4/init/do_mounts_md.c linux-2.6.39.4/init/do_mounts_md.c
55861 --- linux-2.6.39.4/init/do_mounts_md.c 2011-05-19 00:06:34.000000000 -0400
55862 +++ linux-2.6.39.4/init/do_mounts_md.c 2011-08-05 19:44:37.000000000 -0400
55863 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55864 partitioned ? "_d" : "", minor,
55865 md_setup_args[ent].device_names);
55866
55867 - fd = sys_open(name, 0, 0);
55868 + fd = sys_open((__force char __user *)name, 0, 0);
55869 if (fd < 0) {
55870 printk(KERN_ERR "md: open failed - cannot start "
55871 "array %s\n", name);
55872 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55873 * array without it
55874 */
55875 sys_close(fd);
55876 - fd = sys_open(name, 0, 0);
55877 + fd = sys_open((__force char __user *)name, 0, 0);
55878 sys_ioctl(fd, BLKRRPART, 0);
55879 }
55880 sys_close(fd);
55881 diff -urNp linux-2.6.39.4/init/initramfs.c linux-2.6.39.4/init/initramfs.c
55882 --- linux-2.6.39.4/init/initramfs.c 2011-05-19 00:06:34.000000000 -0400
55883 +++ linux-2.6.39.4/init/initramfs.c 2011-08-05 19:44:37.000000000 -0400
55884 @@ -74,7 +74,7 @@ static void __init free_hash(void)
55885 }
55886 }
55887
55888 -static long __init do_utime(char __user *filename, time_t mtime)
55889 +static long __init do_utime(__force char __user *filename, time_t mtime)
55890 {
55891 struct timespec t[2];
55892
55893 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
55894 struct dir_entry *de, *tmp;
55895 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55896 list_del(&de->list);
55897 - do_utime(de->name, de->mtime);
55898 + do_utime((__force char __user *)de->name, de->mtime);
55899 kfree(de->name);
55900 kfree(de);
55901 }
55902 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
55903 if (nlink >= 2) {
55904 char *old = find_link(major, minor, ino, mode, collected);
55905 if (old)
55906 - return (sys_link(old, collected) < 0) ? -1 : 1;
55907 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55908 }
55909 return 0;
55910 }
55911 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
55912 {
55913 struct stat st;
55914
55915 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55916 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55917 if (S_ISDIR(st.st_mode))
55918 - sys_rmdir(path);
55919 + sys_rmdir((__force char __user *)path);
55920 else
55921 - sys_unlink(path);
55922 + sys_unlink((__force char __user *)path);
55923 }
55924 }
55925
55926 @@ -305,7 +305,7 @@ static int __init do_name(void)
55927 int openflags = O_WRONLY|O_CREAT;
55928 if (ml != 1)
55929 openflags |= O_TRUNC;
55930 - wfd = sys_open(collected, openflags, mode);
55931 + wfd = sys_open((__force char __user *)collected, openflags, mode);
55932
55933 if (wfd >= 0) {
55934 sys_fchown(wfd, uid, gid);
55935 @@ -317,17 +317,17 @@ static int __init do_name(void)
55936 }
55937 }
55938 } else if (S_ISDIR(mode)) {
55939 - sys_mkdir(collected, mode);
55940 - sys_chown(collected, uid, gid);
55941 - sys_chmod(collected, mode);
55942 + sys_mkdir((__force char __user *)collected, mode);
55943 + sys_chown((__force char __user *)collected, uid, gid);
55944 + sys_chmod((__force char __user *)collected, mode);
55945 dir_add(collected, mtime);
55946 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55947 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55948 if (maybe_link() == 0) {
55949 - sys_mknod(collected, mode, rdev);
55950 - sys_chown(collected, uid, gid);
55951 - sys_chmod(collected, mode);
55952 - do_utime(collected, mtime);
55953 + sys_mknod((__force char __user *)collected, mode, rdev);
55954 + sys_chown((__force char __user *)collected, uid, gid);
55955 + sys_chmod((__force char __user *)collected, mode);
55956 + do_utime((__force char __user *)collected, mtime);
55957 }
55958 }
55959 return 0;
55960 @@ -336,15 +336,15 @@ static int __init do_name(void)
55961 static int __init do_copy(void)
55962 {
55963 if (count >= body_len) {
55964 - sys_write(wfd, victim, body_len);
55965 + sys_write(wfd, (__force char __user *)victim, body_len);
55966 sys_close(wfd);
55967 - do_utime(vcollected, mtime);
55968 + do_utime((__force char __user *)vcollected, mtime);
55969 kfree(vcollected);
55970 eat(body_len);
55971 state = SkipIt;
55972 return 0;
55973 } else {
55974 - sys_write(wfd, victim, count);
55975 + sys_write(wfd, (__force char __user *)victim, count);
55976 body_len -= count;
55977 eat(count);
55978 return 1;
55979 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
55980 {
55981 collected[N_ALIGN(name_len) + body_len] = '\0';
55982 clean_path(collected, 0);
55983 - sys_symlink(collected + N_ALIGN(name_len), collected);
55984 - sys_lchown(collected, uid, gid);
55985 - do_utime(collected, mtime);
55986 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55987 + sys_lchown((__force char __user *)collected, uid, gid);
55988 + do_utime((__force char __user *)collected, mtime);
55989 state = SkipIt;
55990 next_state = Reset;
55991 return 0;
55992 diff -urNp linux-2.6.39.4/init/Kconfig linux-2.6.39.4/init/Kconfig
55993 --- linux-2.6.39.4/init/Kconfig 2011-05-19 00:06:34.000000000 -0400
55994 +++ linux-2.6.39.4/init/Kconfig 2011-08-05 19:44:37.000000000 -0400
55995 @@ -1202,7 +1202,7 @@ config SLUB_DEBUG
55996
55997 config COMPAT_BRK
55998 bool "Disable heap randomization"
55999 - default y
56000 + default n
56001 help
56002 Randomizing heap placement makes heap exploits harder, but it
56003 also breaks ancient binaries (including anything libc5 based).
56004 diff -urNp linux-2.6.39.4/init/main.c linux-2.6.39.4/init/main.c
56005 --- linux-2.6.39.4/init/main.c 2011-06-03 00:04:14.000000000 -0400
56006 +++ linux-2.6.39.4/init/main.c 2011-08-05 20:34:06.000000000 -0400
56007 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
56008 extern void tc_init(void);
56009 #endif
56010
56011 +extern void grsecurity_init(void);
56012 +
56013 /*
56014 * Debug helper: via this flag we know that we are in 'early bootup code'
56015 * where only the boot processor is running with IRQ disabled. This means
56016 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
56017
56018 __setup("reset_devices", set_reset_devices);
56019
56020 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
56021 +extern char pax_enter_kernel_user[];
56022 +extern char pax_exit_kernel_user[];
56023 +extern pgdval_t clone_pgd_mask;
56024 +#endif
56025 +
56026 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
56027 +static int __init setup_pax_nouderef(char *str)
56028 +{
56029 +#ifdef CONFIG_X86_32
56030 + unsigned int cpu;
56031 + struct desc_struct *gdt;
56032 +
56033 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
56034 + gdt = get_cpu_gdt_table(cpu);
56035 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
56036 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
56037 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
56038 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
56039 + }
56040 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
56041 +#else
56042 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
56043 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
56044 + clone_pgd_mask = ~(pgdval_t)0UL;
56045 +#endif
56046 +
56047 + return 0;
56048 +}
56049 +early_param("pax_nouderef", setup_pax_nouderef);
56050 +#endif
56051 +
56052 +#ifdef CONFIG_PAX_SOFTMODE
56053 +int pax_softmode;
56054 +
56055 +static int __init setup_pax_softmode(char *str)
56056 +{
56057 + get_option(&str, &pax_softmode);
56058 + return 1;
56059 +}
56060 +__setup("pax_softmode=", setup_pax_softmode);
56061 +#endif
56062 +
56063 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
56064 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
56065 static const char *panic_later, *panic_param;
56066 @@ -663,6 +708,7 @@ int __init_or_module do_one_initcall(ini
56067 {
56068 int count = preempt_count();
56069 int ret;
56070 + const char *msg1 = "", *msg2 = "";
56071
56072 if (initcall_debug)
56073 ret = do_one_initcall_debug(fn);
56074 @@ -675,15 +721,15 @@ int __init_or_module do_one_initcall(ini
56075 sprintf(msgbuf, "error code %d ", ret);
56076
56077 if (preempt_count() != count) {
56078 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
56079 + msg1 = " preemption imbalance";
56080 preempt_count() = count;
56081 }
56082 if (irqs_disabled()) {
56083 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
56084 + msg2 = " disabled interrupts";
56085 local_irq_enable();
56086 }
56087 - if (msgbuf[0]) {
56088 - printk("initcall %pF returned with %s\n", fn, msgbuf);
56089 + if (msgbuf[0] || *msg1 || *msg2) {
56090 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
56091 }
56092
56093 return ret;
56094 @@ -801,7 +847,7 @@ static int __init kernel_init(void * unu
56095 do_basic_setup();
56096
56097 /* Open the /dev/console on the rootfs, this should never fail */
56098 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
56099 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
56100 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
56101
56102 (void) sys_dup(0);
56103 @@ -814,11 +860,13 @@ static int __init kernel_init(void * unu
56104 if (!ramdisk_execute_command)
56105 ramdisk_execute_command = "/init";
56106
56107 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
56108 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
56109 ramdisk_execute_command = NULL;
56110 prepare_namespace();
56111 }
56112
56113 + grsecurity_init();
56114 +
56115 /*
56116 * Ok, we have completed the initial bootup, and
56117 * we're essentially up and running. Get rid of the
56118 diff -urNp linux-2.6.39.4/ipc/mqueue.c linux-2.6.39.4/ipc/mqueue.c
56119 --- linux-2.6.39.4/ipc/mqueue.c 2011-05-19 00:06:34.000000000 -0400
56120 +++ linux-2.6.39.4/ipc/mqueue.c 2011-08-05 19:44:37.000000000 -0400
56121 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
56122 mq_bytes = (mq_msg_tblsz +
56123 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
56124
56125 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
56126 spin_lock(&mq_lock);
56127 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
56128 u->mq_bytes + mq_bytes >
56129 diff -urNp linux-2.6.39.4/ipc/msg.c linux-2.6.39.4/ipc/msg.c
56130 --- linux-2.6.39.4/ipc/msg.c 2011-05-19 00:06:34.000000000 -0400
56131 +++ linux-2.6.39.4/ipc/msg.c 2011-08-05 20:34:06.000000000 -0400
56132 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
56133 return security_msg_queue_associate(msq, msgflg);
56134 }
56135
56136 +static struct ipc_ops msg_ops = {
56137 + .getnew = newque,
56138 + .associate = msg_security,
56139 + .more_checks = NULL
56140 +};
56141 +
56142 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
56143 {
56144 struct ipc_namespace *ns;
56145 - struct ipc_ops msg_ops;
56146 struct ipc_params msg_params;
56147
56148 ns = current->nsproxy->ipc_ns;
56149
56150 - msg_ops.getnew = newque;
56151 - msg_ops.associate = msg_security;
56152 - msg_ops.more_checks = NULL;
56153 -
56154 msg_params.key = key;
56155 msg_params.flg = msgflg;
56156
56157 diff -urNp linux-2.6.39.4/ipc/sem.c linux-2.6.39.4/ipc/sem.c
56158 --- linux-2.6.39.4/ipc/sem.c 2011-05-19 00:06:34.000000000 -0400
56159 +++ linux-2.6.39.4/ipc/sem.c 2011-08-05 20:34:06.000000000 -0400
56160 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
56161 return 0;
56162 }
56163
56164 +static struct ipc_ops sem_ops = {
56165 + .getnew = newary,
56166 + .associate = sem_security,
56167 + .more_checks = sem_more_checks
56168 +};
56169 +
56170 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
56171 {
56172 struct ipc_namespace *ns;
56173 - struct ipc_ops sem_ops;
56174 struct ipc_params sem_params;
56175
56176 ns = current->nsproxy->ipc_ns;
56177 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
56178 if (nsems < 0 || nsems > ns->sc_semmsl)
56179 return -EINVAL;
56180
56181 - sem_ops.getnew = newary;
56182 - sem_ops.associate = sem_security;
56183 - sem_ops.more_checks = sem_more_checks;
56184 -
56185 sem_params.key = key;
56186 sem_params.flg = semflg;
56187 sem_params.u.nsems = nsems;
56188 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
56189 int nsems;
56190 struct list_head tasks;
56191
56192 + pax_track_stack();
56193 +
56194 sma = sem_lock_check(ns, semid);
56195 if (IS_ERR(sma))
56196 return PTR_ERR(sma);
56197 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
56198 struct ipc_namespace *ns;
56199 struct list_head tasks;
56200
56201 + pax_track_stack();
56202 +
56203 ns = current->nsproxy->ipc_ns;
56204
56205 if (nsops < 1 || semid < 0)
56206 diff -urNp linux-2.6.39.4/ipc/shm.c linux-2.6.39.4/ipc/shm.c
56207 --- linux-2.6.39.4/ipc/shm.c 2011-05-19 00:06:34.000000000 -0400
56208 +++ linux-2.6.39.4/ipc/shm.c 2011-08-05 20:34:06.000000000 -0400
56209 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
56210 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
56211 #endif
56212
56213 +#ifdef CONFIG_GRKERNSEC
56214 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56215 + const time_t shm_createtime, const uid_t cuid,
56216 + const int shmid);
56217 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56218 + const time_t shm_createtime);
56219 +#endif
56220 +
56221 void shm_init_ns(struct ipc_namespace *ns)
56222 {
56223 ns->shm_ctlmax = SHMMAX;
56224 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
56225 shp->shm_lprid = 0;
56226 shp->shm_atim = shp->shm_dtim = 0;
56227 shp->shm_ctim = get_seconds();
56228 +#ifdef CONFIG_GRKERNSEC
56229 + {
56230 + struct timespec timeval;
56231 + do_posix_clock_monotonic_gettime(&timeval);
56232 +
56233 + shp->shm_createtime = timeval.tv_sec;
56234 + }
56235 +#endif
56236 shp->shm_segsz = size;
56237 shp->shm_nattch = 0;
56238 shp->shm_file = file;
56239 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
56240 return 0;
56241 }
56242
56243 +static struct ipc_ops shm_ops = {
56244 + .getnew = newseg,
56245 + .associate = shm_security,
56246 + .more_checks = shm_more_checks
56247 +};
56248 +
56249 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
56250 {
56251 struct ipc_namespace *ns;
56252 - struct ipc_ops shm_ops;
56253 struct ipc_params shm_params;
56254
56255 ns = current->nsproxy->ipc_ns;
56256
56257 - shm_ops.getnew = newseg;
56258 - shm_ops.associate = shm_security;
56259 - shm_ops.more_checks = shm_more_checks;
56260 -
56261 shm_params.key = key;
56262 shm_params.flg = shmflg;
56263 shm_params.u.size = size;
56264 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
56265 case SHM_LOCK:
56266 case SHM_UNLOCK:
56267 {
56268 - struct file *uninitialized_var(shm_file);
56269 -
56270 lru_add_drain_all(); /* drain pagevecs to lru lists */
56271
56272 shp = shm_lock_check(ns, shmid);
56273 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
56274 if (err)
56275 goto out_unlock;
56276
56277 +#ifdef CONFIG_GRKERNSEC
56278 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
56279 + shp->shm_perm.cuid, shmid) ||
56280 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56281 + err = -EACCES;
56282 + goto out_unlock;
56283 + }
56284 +#endif
56285 +
56286 path = shp->shm_file->f_path;
56287 path_get(&path);
56288 shp->shm_nattch++;
56289 +#ifdef CONFIG_GRKERNSEC
56290 + shp->shm_lapid = current->pid;
56291 +#endif
56292 size = i_size_read(path.dentry->d_inode);
56293 shm_unlock(shp);
56294
56295 diff -urNp linux-2.6.39.4/kernel/acct.c linux-2.6.39.4/kernel/acct.c
56296 --- linux-2.6.39.4/kernel/acct.c 2011-05-19 00:06:34.000000000 -0400
56297 +++ linux-2.6.39.4/kernel/acct.c 2011-08-05 19:44:37.000000000 -0400
56298 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56299 */
56300 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56301 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56302 - file->f_op->write(file, (char *)&ac,
56303 + file->f_op->write(file, (__force char __user *)&ac,
56304 sizeof(acct_t), &file->f_pos);
56305 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56306 set_fs(fs);
56307 diff -urNp linux-2.6.39.4/kernel/audit.c linux-2.6.39.4/kernel/audit.c
56308 --- linux-2.6.39.4/kernel/audit.c 2011-05-19 00:06:34.000000000 -0400
56309 +++ linux-2.6.39.4/kernel/audit.c 2011-08-05 19:44:37.000000000 -0400
56310 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56311 3) suppressed due to audit_rate_limit
56312 4) suppressed due to audit_backlog_limit
56313 */
56314 -static atomic_t audit_lost = ATOMIC_INIT(0);
56315 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56316
56317 /* The netlink socket. */
56318 static struct sock *audit_sock;
56319 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56320 unsigned long now;
56321 int print;
56322
56323 - atomic_inc(&audit_lost);
56324 + atomic_inc_unchecked(&audit_lost);
56325
56326 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56327
56328 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56329 printk(KERN_WARNING
56330 "audit: audit_lost=%d audit_rate_limit=%d "
56331 "audit_backlog_limit=%d\n",
56332 - atomic_read(&audit_lost),
56333 + atomic_read_unchecked(&audit_lost),
56334 audit_rate_limit,
56335 audit_backlog_limit);
56336 audit_panic(message);
56337 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56338 status_set.pid = audit_pid;
56339 status_set.rate_limit = audit_rate_limit;
56340 status_set.backlog_limit = audit_backlog_limit;
56341 - status_set.lost = atomic_read(&audit_lost);
56342 + status_set.lost = atomic_read_unchecked(&audit_lost);
56343 status_set.backlog = skb_queue_len(&audit_skb_queue);
56344 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56345 &status_set, sizeof(status_set));
56346 diff -urNp linux-2.6.39.4/kernel/auditsc.c linux-2.6.39.4/kernel/auditsc.c
56347 --- linux-2.6.39.4/kernel/auditsc.c 2011-05-19 00:06:34.000000000 -0400
56348 +++ linux-2.6.39.4/kernel/auditsc.c 2011-08-05 19:44:37.000000000 -0400
56349 @@ -2111,7 +2111,7 @@ int auditsc_get_stamp(struct audit_conte
56350 }
56351
56352 /* global counter which is incremented every time something logs in */
56353 -static atomic_t session_id = ATOMIC_INIT(0);
56354 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56355
56356 /**
56357 * audit_set_loginuid - set a task's audit_context loginuid
56358 @@ -2124,7 +2124,7 @@ static atomic_t session_id = ATOMIC_INIT
56359 */
56360 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56361 {
56362 - unsigned int sessionid = atomic_inc_return(&session_id);
56363 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56364 struct audit_context *context = task->audit_context;
56365
56366 if (context && context->in_syscall) {
56367 diff -urNp linux-2.6.39.4/kernel/capability.c linux-2.6.39.4/kernel/capability.c
56368 --- linux-2.6.39.4/kernel/capability.c 2011-05-19 00:06:34.000000000 -0400
56369 +++ linux-2.6.39.4/kernel/capability.c 2011-08-05 19:44:37.000000000 -0400
56370 @@ -206,6 +206,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56371 * before modification is attempted and the application
56372 * fails.
56373 */
56374 + if (tocopy > ARRAY_SIZE(kdata))
56375 + return -EFAULT;
56376 +
56377 if (copy_to_user(dataptr, kdata, tocopy
56378 * sizeof(struct __user_cap_data_struct))) {
56379 return -EFAULT;
56380 @@ -378,7 +381,7 @@ bool ns_capable(struct user_namespace *n
56381 BUG();
56382 }
56383
56384 - if (security_capable(ns, current_cred(), cap) == 0) {
56385 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56386 current->flags |= PF_SUPERPRIV;
56387 return true;
56388 }
56389 @@ -386,6 +389,27 @@ bool ns_capable(struct user_namespace *n
56390 }
56391 EXPORT_SYMBOL(ns_capable);
56392
56393 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
56394 +{
56395 + if (unlikely(!cap_valid(cap))) {
56396 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56397 + BUG();
56398 + }
56399 +
56400 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56401 + current->flags |= PF_SUPERPRIV;
56402 + return true;
56403 + }
56404 + return false;
56405 +}
56406 +EXPORT_SYMBOL(ns_capable_nolog);
56407 +
56408 +bool capable_nolog(int cap)
56409 +{
56410 + return ns_capable_nolog(&init_user_ns, cap);
56411 +}
56412 +EXPORT_SYMBOL(capable_nolog);
56413 +
56414 /**
56415 * task_ns_capable - Determine whether current task has a superior
56416 * capability targeted at a specific task's user namespace.
56417 @@ -400,6 +424,12 @@ bool task_ns_capable(struct task_struct
56418 }
56419 EXPORT_SYMBOL(task_ns_capable);
56420
56421 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
56422 +{
56423 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56424 +}
56425 +EXPORT_SYMBOL(task_ns_capable_nolog);
56426 +
56427 /**
56428 * nsown_capable - Check superior capability to one's own user_ns
56429 * @cap: The capability in question
56430 diff -urNp linux-2.6.39.4/kernel/cgroup.c linux-2.6.39.4/kernel/cgroup.c
56431 --- linux-2.6.39.4/kernel/cgroup.c 2011-05-19 00:06:34.000000000 -0400
56432 +++ linux-2.6.39.4/kernel/cgroup.c 2011-08-05 19:44:37.000000000 -0400
56433 @@ -598,6 +598,8 @@ static struct css_set *find_css_set(
56434 struct hlist_head *hhead;
56435 struct cg_cgroup_link *link;
56436
56437 + pax_track_stack();
56438 +
56439 /* First see if we already have a cgroup group that matches
56440 * the desired set */
56441 read_lock(&css_set_lock);
56442 diff -urNp linux-2.6.39.4/kernel/compat.c linux-2.6.39.4/kernel/compat.c
56443 --- linux-2.6.39.4/kernel/compat.c 2011-05-19 00:06:34.000000000 -0400
56444 +++ linux-2.6.39.4/kernel/compat.c 2011-08-05 19:44:37.000000000 -0400
56445 @@ -13,6 +13,7 @@
56446
56447 #include <linux/linkage.h>
56448 #include <linux/compat.h>
56449 +#include <linux/module.h>
56450 #include <linux/errno.h>
56451 #include <linux/time.h>
56452 #include <linux/signal.h>
56453 diff -urNp linux-2.6.39.4/kernel/configs.c linux-2.6.39.4/kernel/configs.c
56454 --- linux-2.6.39.4/kernel/configs.c 2011-05-19 00:06:34.000000000 -0400
56455 +++ linux-2.6.39.4/kernel/configs.c 2011-08-05 19:44:37.000000000 -0400
56456 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56457 struct proc_dir_entry *entry;
56458
56459 /* create the current config file */
56460 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56461 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56462 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56463 + &ikconfig_file_ops);
56464 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56465 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56466 + &ikconfig_file_ops);
56467 +#endif
56468 +#else
56469 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56470 &ikconfig_file_ops);
56471 +#endif
56472 +
56473 if (!entry)
56474 return -ENOMEM;
56475
56476 diff -urNp linux-2.6.39.4/kernel/cred.c linux-2.6.39.4/kernel/cred.c
56477 --- linux-2.6.39.4/kernel/cred.c 2011-05-19 00:06:34.000000000 -0400
56478 +++ linux-2.6.39.4/kernel/cred.c 2011-08-05 19:44:37.000000000 -0400
56479 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56480 */
56481 void __put_cred(struct cred *cred)
56482 {
56483 + pax_track_stack();
56484 +
56485 kdebug("__put_cred(%p{%d,%d})", cred,
56486 atomic_read(&cred->usage),
56487 read_cred_subscribers(cred));
56488 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56489 {
56490 struct cred *cred;
56491
56492 + pax_track_stack();
56493 +
56494 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56495 atomic_read(&tsk->cred->usage),
56496 read_cred_subscribers(tsk->cred));
56497 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56498 {
56499 const struct cred *cred;
56500
56501 + pax_track_stack();
56502 +
56503 rcu_read_lock();
56504
56505 do {
56506 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56507 {
56508 struct cred *new;
56509
56510 + pax_track_stack();
56511 +
56512 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56513 if (!new)
56514 return NULL;
56515 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56516 const struct cred *old;
56517 struct cred *new;
56518
56519 + pax_track_stack();
56520 +
56521 validate_process_creds();
56522
56523 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56524 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56525 struct thread_group_cred *tgcred = NULL;
56526 struct cred *new;
56527
56528 + pax_track_stack();
56529 +
56530 #ifdef CONFIG_KEYS
56531 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56532 if (!tgcred)
56533 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56534 struct cred *new;
56535 int ret;
56536
56537 + pax_track_stack();
56538 +
56539 if (
56540 #ifdef CONFIG_KEYS
56541 !p->cred->thread_keyring &&
56542 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56543 struct task_struct *task = current;
56544 const struct cred *old = task->real_cred;
56545
56546 + pax_track_stack();
56547 +
56548 kdebug("commit_creds(%p{%d,%d})", new,
56549 atomic_read(&new->usage),
56550 read_cred_subscribers(new));
56551 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56552
56553 get_cred(new); /* we will require a ref for the subj creds too */
56554
56555 + gr_set_role_label(task, new->uid, new->gid);
56556 +
56557 /* dumpability changes */
56558 if (old->euid != new->euid ||
56559 old->egid != new->egid ||
56560 @@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
56561 */
56562 void abort_creds(struct cred *new)
56563 {
56564 + pax_track_stack();
56565 +
56566 kdebug("abort_creds(%p{%d,%d})", new,
56567 atomic_read(&new->usage),
56568 read_cred_subscribers(new));
56569 @@ -574,6 +594,8 @@ const struct cred *override_creds(const
56570 {
56571 const struct cred *old = current->cred;
56572
56573 + pax_track_stack();
56574 +
56575 kdebug("override_creds(%p{%d,%d})", new,
56576 atomic_read(&new->usage),
56577 read_cred_subscribers(new));
56578 @@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
56579 {
56580 const struct cred *override = current->cred;
56581
56582 + pax_track_stack();
56583 +
56584 kdebug("revert_creds(%p{%d,%d})", old,
56585 atomic_read(&old->usage),
56586 read_cred_subscribers(old));
56587 @@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
56588 const struct cred *old;
56589 struct cred *new;
56590
56591 + pax_track_stack();
56592 +
56593 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56594 if (!new)
56595 return NULL;
56596 @@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56597 */
56598 int set_security_override(struct cred *new, u32 secid)
56599 {
56600 + pax_track_stack();
56601 +
56602 return security_kernel_act_as(new, secid);
56603 }
56604 EXPORT_SYMBOL(set_security_override);
56605 @@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
56606 u32 secid;
56607 int ret;
56608
56609 + pax_track_stack();
56610 +
56611 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56612 if (ret < 0)
56613 return ret;
56614 diff -urNp linux-2.6.39.4/kernel/debug/debug_core.c linux-2.6.39.4/kernel/debug/debug_core.c
56615 --- linux-2.6.39.4/kernel/debug/debug_core.c 2011-05-19 00:06:34.000000000 -0400
56616 +++ linux-2.6.39.4/kernel/debug/debug_core.c 2011-08-05 20:34:06.000000000 -0400
56617 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56618 */
56619 static atomic_t masters_in_kgdb;
56620 static atomic_t slaves_in_kgdb;
56621 -static atomic_t kgdb_break_tasklet_var;
56622 +static atomic_unchecked_t kgdb_break_tasklet_var;
56623 atomic_t kgdb_setting_breakpoint;
56624
56625 struct task_struct *kgdb_usethread;
56626 @@ -129,7 +129,7 @@ int kgdb_single_step;
56627 static pid_t kgdb_sstep_pid;
56628
56629 /* to keep track of the CPU which is doing the single stepping*/
56630 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56631 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56632
56633 /*
56634 * If you are debugging a problem where roundup (the collection of
56635 @@ -542,7 +542,7 @@ return_normal:
56636 * kernel will only try for the value of sstep_tries before
56637 * giving up and continuing on.
56638 */
56639 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56640 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56641 (kgdb_info[cpu].task &&
56642 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56643 atomic_set(&kgdb_active, -1);
56644 @@ -636,8 +636,8 @@ cpu_master_loop:
56645 }
56646
56647 kgdb_restore:
56648 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56649 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56650 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56651 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56652 if (kgdb_info[sstep_cpu].task)
56653 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56654 else
56655 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56656 static void kgdb_tasklet_bpt(unsigned long ing)
56657 {
56658 kgdb_breakpoint();
56659 - atomic_set(&kgdb_break_tasklet_var, 0);
56660 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56661 }
56662
56663 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56664
56665 void kgdb_schedule_breakpoint(void)
56666 {
56667 - if (atomic_read(&kgdb_break_tasklet_var) ||
56668 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56669 atomic_read(&kgdb_active) != -1 ||
56670 atomic_read(&kgdb_setting_breakpoint))
56671 return;
56672 - atomic_inc(&kgdb_break_tasklet_var);
56673 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
56674 tasklet_schedule(&kgdb_tasklet_breakpoint);
56675 }
56676 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56677 diff -urNp linux-2.6.39.4/kernel/debug/kdb/kdb_main.c linux-2.6.39.4/kernel/debug/kdb/kdb_main.c
56678 --- linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-05-19 00:06:34.000000000 -0400
56679 +++ linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-08-05 19:44:37.000000000 -0400
56680 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56681 list_for_each_entry(mod, kdb_modules, list) {
56682
56683 kdb_printf("%-20s%8u 0x%p ", mod->name,
56684 - mod->core_size, (void *)mod);
56685 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
56686 #ifdef CONFIG_MODULE_UNLOAD
56687 kdb_printf("%4d ", module_refcount(mod));
56688 #endif
56689 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56690 kdb_printf(" (Loading)");
56691 else
56692 kdb_printf(" (Live)");
56693 - kdb_printf(" 0x%p", mod->module_core);
56694 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56695
56696 #ifdef CONFIG_MODULE_UNLOAD
56697 {
56698 diff -urNp linux-2.6.39.4/kernel/exit.c linux-2.6.39.4/kernel/exit.c
56699 --- linux-2.6.39.4/kernel/exit.c 2011-05-19 00:06:34.000000000 -0400
56700 +++ linux-2.6.39.4/kernel/exit.c 2011-08-17 19:20:17.000000000 -0400
56701 @@ -57,6 +57,10 @@
56702 #include <asm/pgtable.h>
56703 #include <asm/mmu_context.h>
56704
56705 +#ifdef CONFIG_GRKERNSEC
56706 +extern rwlock_t grsec_exec_file_lock;
56707 +#endif
56708 +
56709 static void exit_mm(struct task_struct * tsk);
56710
56711 static void __unhash_process(struct task_struct *p, bool group_dead)
56712 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
56713 struct task_struct *leader;
56714 int zap_leader;
56715 repeat:
56716 +#ifdef CONFIG_NET
56717 + gr_del_task_from_ip_table(p);
56718 +#endif
56719 +
56720 tracehook_prepare_release_task(p);
56721 /* don't need to get the RCU readlock here - the process is dead and
56722 * can't be modifying its own credentials. But shut RCU-lockdep up */
56723 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
56724 {
56725 write_lock_irq(&tasklist_lock);
56726
56727 +#ifdef CONFIG_GRKERNSEC
56728 + write_lock(&grsec_exec_file_lock);
56729 + if (current->exec_file) {
56730 + fput(current->exec_file);
56731 + current->exec_file = NULL;
56732 + }
56733 + write_unlock(&grsec_exec_file_lock);
56734 +#endif
56735 +
56736 ptrace_unlink(current);
56737 /* Reparent to init */
56738 current->real_parent = current->parent = kthreadd_task;
56739 list_move_tail(&current->sibling, &current->real_parent->children);
56740
56741 + gr_set_kernel_label(current);
56742 +
56743 /* Set the exit signal to SIGCHLD so we signal init on exit */
56744 current->exit_signal = SIGCHLD;
56745
56746 @@ -394,7 +413,7 @@ int allow_signal(int sig)
56747 * know it'll be handled, so that they don't get converted to
56748 * SIGKILL or just silently dropped.
56749 */
56750 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56751 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56752 recalc_sigpending();
56753 spin_unlock_irq(&current->sighand->siglock);
56754 return 0;
56755 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
56756 vsnprintf(current->comm, sizeof(current->comm), name, args);
56757 va_end(args);
56758
56759 +#ifdef CONFIG_GRKERNSEC
56760 + write_lock(&grsec_exec_file_lock);
56761 + if (current->exec_file) {
56762 + fput(current->exec_file);
56763 + current->exec_file = NULL;
56764 + }
56765 + write_unlock(&grsec_exec_file_lock);
56766 +#endif
56767 +
56768 + gr_set_kernel_label(current);
56769 +
56770 /*
56771 * If we were started as result of loading a module, close all of the
56772 * user space pages. We don't need them, and if we didn't close them
56773 @@ -905,15 +935,8 @@ NORET_TYPE void do_exit(long code)
56774 struct task_struct *tsk = current;
56775 int group_dead;
56776
56777 - profile_task_exit(tsk);
56778 -
56779 - WARN_ON(atomic_read(&tsk->fs_excl));
56780 - WARN_ON(blk_needs_flush_plug(tsk));
56781 -
56782 if (unlikely(in_interrupt()))
56783 panic("Aiee, killing interrupt handler!");
56784 - if (unlikely(!tsk->pid))
56785 - panic("Attempted to kill the idle task!");
56786
56787 /*
56788 * If do_exit is called because this processes oopsed, it's possible
56789 @@ -924,6 +947,14 @@ NORET_TYPE void do_exit(long code)
56790 */
56791 set_fs(USER_DS);
56792
56793 + profile_task_exit(tsk);
56794 +
56795 + WARN_ON(atomic_read(&tsk->fs_excl));
56796 + WARN_ON(blk_needs_flush_plug(tsk));
56797 +
56798 + if (unlikely(!tsk->pid))
56799 + panic("Attempted to kill the idle task!");
56800 +
56801 tracehook_report_exit(&code);
56802
56803 validate_creds_for_do_exit(tsk);
56804 @@ -984,6 +1015,9 @@ NORET_TYPE void do_exit(long code)
56805 tsk->exit_code = code;
56806 taskstats_exit(tsk, group_dead);
56807
56808 + gr_acl_handle_psacct(tsk, code);
56809 + gr_acl_handle_exit();
56810 +
56811 exit_mm(tsk);
56812
56813 if (group_dead)
56814 diff -urNp linux-2.6.39.4/kernel/fork.c linux-2.6.39.4/kernel/fork.c
56815 --- linux-2.6.39.4/kernel/fork.c 2011-05-19 00:06:34.000000000 -0400
56816 +++ linux-2.6.39.4/kernel/fork.c 2011-08-05 19:44:37.000000000 -0400
56817 @@ -287,7 +287,7 @@ static struct task_struct *dup_task_stru
56818 *stackend = STACK_END_MAGIC; /* for overflow detection */
56819
56820 #ifdef CONFIG_CC_STACKPROTECTOR
56821 - tsk->stack_canary = get_random_int();
56822 + tsk->stack_canary = pax_get_random_long();
56823 #endif
56824
56825 /* One for us, one for whoever does the "release_task()" (usually parent) */
56826 @@ -309,13 +309,78 @@ out:
56827 }
56828
56829 #ifdef CONFIG_MMU
56830 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56831 +{
56832 + struct vm_area_struct *tmp;
56833 + unsigned long charge;
56834 + struct mempolicy *pol;
56835 + struct file *file;
56836 +
56837 + charge = 0;
56838 + if (mpnt->vm_flags & VM_ACCOUNT) {
56839 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56840 + if (security_vm_enough_memory(len))
56841 + goto fail_nomem;
56842 + charge = len;
56843 + }
56844 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56845 + if (!tmp)
56846 + goto fail_nomem;
56847 + *tmp = *mpnt;
56848 + tmp->vm_mm = mm;
56849 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
56850 + pol = mpol_dup(vma_policy(mpnt));
56851 + if (IS_ERR(pol))
56852 + goto fail_nomem_policy;
56853 + vma_set_policy(tmp, pol);
56854 + if (anon_vma_fork(tmp, mpnt))
56855 + goto fail_nomem_anon_vma_fork;
56856 + tmp->vm_flags &= ~VM_LOCKED;
56857 + tmp->vm_next = tmp->vm_prev = NULL;
56858 + tmp->vm_mirror = NULL;
56859 + file = tmp->vm_file;
56860 + if (file) {
56861 + struct inode *inode = file->f_path.dentry->d_inode;
56862 + struct address_space *mapping = file->f_mapping;
56863 +
56864 + get_file(file);
56865 + if (tmp->vm_flags & VM_DENYWRITE)
56866 + atomic_dec(&inode->i_writecount);
56867 + spin_lock(&mapping->i_mmap_lock);
56868 + if (tmp->vm_flags & VM_SHARED)
56869 + mapping->i_mmap_writable++;
56870 + tmp->vm_truncate_count = mpnt->vm_truncate_count;
56871 + flush_dcache_mmap_lock(mapping);
56872 + /* insert tmp into the share list, just after mpnt */
56873 + vma_prio_tree_add(tmp, mpnt);
56874 + flush_dcache_mmap_unlock(mapping);
56875 + spin_unlock(&mapping->i_mmap_lock);
56876 + }
56877 +
56878 + /*
56879 + * Clear hugetlb-related page reserves for children. This only
56880 + * affects MAP_PRIVATE mappings. Faults generated by the child
56881 + * are not guaranteed to succeed, even if read-only
56882 + */
56883 + if (is_vm_hugetlb_page(tmp))
56884 + reset_vma_resv_huge_pages(tmp);
56885 +
56886 + return tmp;
56887 +
56888 +fail_nomem_anon_vma_fork:
56889 + mpol_put(pol);
56890 +fail_nomem_policy:
56891 + kmem_cache_free(vm_area_cachep, tmp);
56892 +fail_nomem:
56893 + vm_unacct_memory(charge);
56894 + return NULL;
56895 +}
56896 +
56897 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56898 {
56899 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56900 struct rb_node **rb_link, *rb_parent;
56901 int retval;
56902 - unsigned long charge;
56903 - struct mempolicy *pol;
56904
56905 down_write(&oldmm->mmap_sem);
56906 flush_cache_dup_mm(oldmm);
56907 @@ -327,8 +392,8 @@ static int dup_mmap(struct mm_struct *mm
56908 mm->locked_vm = 0;
56909 mm->mmap = NULL;
56910 mm->mmap_cache = NULL;
56911 - mm->free_area_cache = oldmm->mmap_base;
56912 - mm->cached_hole_size = ~0UL;
56913 + mm->free_area_cache = oldmm->free_area_cache;
56914 + mm->cached_hole_size = oldmm->cached_hole_size;
56915 mm->map_count = 0;
56916 cpumask_clear(mm_cpumask(mm));
56917 mm->mm_rb = RB_ROOT;
56918 @@ -344,8 +409,6 @@ static int dup_mmap(struct mm_struct *mm
56919
56920 prev = NULL;
56921 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56922 - struct file *file;
56923 -
56924 if (mpnt->vm_flags & VM_DONTCOPY) {
56925 long pages = vma_pages(mpnt);
56926 mm->total_vm -= pages;
56927 @@ -353,56 +416,13 @@ static int dup_mmap(struct mm_struct *mm
56928 -pages);
56929 continue;
56930 }
56931 - charge = 0;
56932 - if (mpnt->vm_flags & VM_ACCOUNT) {
56933 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56934 - if (security_vm_enough_memory(len))
56935 - goto fail_nomem;
56936 - charge = len;
56937 - }
56938 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56939 - if (!tmp)
56940 - goto fail_nomem;
56941 - *tmp = *mpnt;
56942 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
56943 - pol = mpol_dup(vma_policy(mpnt));
56944 - retval = PTR_ERR(pol);
56945 - if (IS_ERR(pol))
56946 - goto fail_nomem_policy;
56947 - vma_set_policy(tmp, pol);
56948 - tmp->vm_mm = mm;
56949 - if (anon_vma_fork(tmp, mpnt))
56950 - goto fail_nomem_anon_vma_fork;
56951 - tmp->vm_flags &= ~VM_LOCKED;
56952 - tmp->vm_next = tmp->vm_prev = NULL;
56953 - file = tmp->vm_file;
56954 - if (file) {
56955 - struct inode *inode = file->f_path.dentry->d_inode;
56956 - struct address_space *mapping = file->f_mapping;
56957 -
56958 - get_file(file);
56959 - if (tmp->vm_flags & VM_DENYWRITE)
56960 - atomic_dec(&inode->i_writecount);
56961 - spin_lock(&mapping->i_mmap_lock);
56962 - if (tmp->vm_flags & VM_SHARED)
56963 - mapping->i_mmap_writable++;
56964 - tmp->vm_truncate_count = mpnt->vm_truncate_count;
56965 - flush_dcache_mmap_lock(mapping);
56966 - /* insert tmp into the share list, just after mpnt */
56967 - vma_prio_tree_add(tmp, mpnt);
56968 - flush_dcache_mmap_unlock(mapping);
56969 - spin_unlock(&mapping->i_mmap_lock);
56970 + tmp = dup_vma(mm, mpnt);
56971 + if (!tmp) {
56972 + retval = -ENOMEM;
56973 + goto out;
56974 }
56975
56976 /*
56977 - * Clear hugetlb-related page reserves for children. This only
56978 - * affects MAP_PRIVATE mappings. Faults generated by the child
56979 - * are not guaranteed to succeed, even if read-only
56980 - */
56981 - if (is_vm_hugetlb_page(tmp))
56982 - reset_vma_resv_huge_pages(tmp);
56983 -
56984 - /*
56985 * Link in the new vma and copy the page table entries.
56986 */
56987 *pprev = tmp;
56988 @@ -423,6 +443,31 @@ static int dup_mmap(struct mm_struct *mm
56989 if (retval)
56990 goto out;
56991 }
56992 +
56993 +#ifdef CONFIG_PAX_SEGMEXEC
56994 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56995 + struct vm_area_struct *mpnt_m;
56996 +
56997 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56998 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56999 +
57000 + if (!mpnt->vm_mirror)
57001 + continue;
57002 +
57003 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
57004 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
57005 + mpnt->vm_mirror = mpnt_m;
57006 + } else {
57007 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
57008 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
57009 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
57010 + mpnt->vm_mirror->vm_mirror = mpnt;
57011 + }
57012 + }
57013 + BUG_ON(mpnt_m);
57014 + }
57015 +#endif
57016 +
57017 /* a new mm has just been created */
57018 arch_dup_mmap(oldmm, mm);
57019 retval = 0;
57020 @@ -431,14 +476,6 @@ out:
57021 flush_tlb_mm(oldmm);
57022 up_write(&oldmm->mmap_sem);
57023 return retval;
57024 -fail_nomem_anon_vma_fork:
57025 - mpol_put(pol);
57026 -fail_nomem_policy:
57027 - kmem_cache_free(vm_area_cachep, tmp);
57028 -fail_nomem:
57029 - retval = -ENOMEM;
57030 - vm_unacct_memory(charge);
57031 - goto out;
57032 }
57033
57034 static inline int mm_alloc_pgd(struct mm_struct * mm)
57035 @@ -785,13 +822,14 @@ static int copy_fs(unsigned long clone_f
57036 spin_unlock(&fs->lock);
57037 return -EAGAIN;
57038 }
57039 - fs->users++;
57040 + atomic_inc(&fs->users);
57041 spin_unlock(&fs->lock);
57042 return 0;
57043 }
57044 tsk->fs = copy_fs_struct(fs);
57045 if (!tsk->fs)
57046 return -ENOMEM;
57047 + gr_set_chroot_entries(tsk, &tsk->fs->root);
57048 return 0;
57049 }
57050
57051 @@ -1049,10 +1087,13 @@ static struct task_struct *copy_process(
57052 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
57053 #endif
57054 retval = -EAGAIN;
57055 +
57056 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
57057 +
57058 if (atomic_read(&p->real_cred->user->processes) >=
57059 task_rlimit(p, RLIMIT_NPROC)) {
57060 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
57061 - p->real_cred->user != INIT_USER)
57062 + if (p->real_cred->user != INIT_USER &&
57063 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
57064 goto bad_fork_free;
57065 }
57066
57067 @@ -1200,6 +1241,8 @@ static struct task_struct *copy_process(
57068 goto bad_fork_free_pid;
57069 }
57070
57071 + gr_copy_label(p);
57072 +
57073 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
57074 /*
57075 * Clear TID on mm_release()?
57076 @@ -1360,6 +1403,8 @@ bad_fork_cleanup_count:
57077 bad_fork_free:
57078 free_task(p);
57079 fork_out:
57080 + gr_log_forkfail(retval);
57081 +
57082 return ERR_PTR(retval);
57083 }
57084
57085 @@ -1448,6 +1493,8 @@ long do_fork(unsigned long clone_flags,
57086 if (clone_flags & CLONE_PARENT_SETTID)
57087 put_user(nr, parent_tidptr);
57088
57089 + gr_handle_brute_check();
57090 +
57091 if (clone_flags & CLONE_VFORK) {
57092 p->vfork_done = &vfork;
57093 init_completion(&vfork);
57094 @@ -1549,7 +1596,7 @@ static int unshare_fs(unsigned long unsh
57095 return 0;
57096
57097 /* don't need lock here; in the worst case we'll do useless copy */
57098 - if (fs->users == 1)
57099 + if (atomic_read(&fs->users) == 1)
57100 return 0;
57101
57102 *new_fsp = copy_fs_struct(fs);
57103 @@ -1636,7 +1683,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
57104 fs = current->fs;
57105 spin_lock(&fs->lock);
57106 current->fs = new_fs;
57107 - if (--fs->users)
57108 + gr_set_chroot_entries(current, &current->fs->root);
57109 + if (atomic_dec_return(&fs->users))
57110 new_fs = NULL;
57111 else
57112 new_fs = fs;
57113 diff -urNp linux-2.6.39.4/kernel/futex.c linux-2.6.39.4/kernel/futex.c
57114 --- linux-2.6.39.4/kernel/futex.c 2011-05-19 00:06:34.000000000 -0400
57115 +++ linux-2.6.39.4/kernel/futex.c 2011-08-05 19:44:37.000000000 -0400
57116 @@ -54,6 +54,7 @@
57117 #include <linux/mount.h>
57118 #include <linux/pagemap.h>
57119 #include <linux/syscalls.h>
57120 +#include <linux/ptrace.h>
57121 #include <linux/signal.h>
57122 #include <linux/module.h>
57123 #include <linux/magic.h>
57124 @@ -236,6 +237,11 @@ get_futex_key(u32 __user *uaddr, int fsh
57125 struct page *page, *page_head;
57126 int err;
57127
57128 +#ifdef CONFIG_PAX_SEGMEXEC
57129 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
57130 + return -EFAULT;
57131 +#endif
57132 +
57133 /*
57134 * The futex address must be "naturally" aligned.
57135 */
57136 @@ -1833,6 +1839,8 @@ static int futex_wait(u32 __user *uaddr,
57137 struct futex_q q = futex_q_init;
57138 int ret;
57139
57140 + pax_track_stack();
57141 +
57142 if (!bitset)
57143 return -EINVAL;
57144 q.bitset = bitset;
57145 @@ -2229,6 +2237,8 @@ static int futex_wait_requeue_pi(u32 __u
57146 struct futex_q q = futex_q_init;
57147 int res, ret;
57148
57149 + pax_track_stack();
57150 +
57151 if (!bitset)
57152 return -EINVAL;
57153
57154 @@ -2401,7 +2411,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57155 {
57156 struct robust_list_head __user *head;
57157 unsigned long ret;
57158 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57159 const struct cred *cred = current_cred(), *pcred;
57160 +#endif
57161
57162 if (!futex_cmpxchg_enabled)
57163 return -ENOSYS;
57164 @@ -2417,6 +2429,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57165 if (!p)
57166 goto err_unlock;
57167 ret = -EPERM;
57168 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57169 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57170 + goto err_unlock;
57171 +#else
57172 pcred = __task_cred(p);
57173 /* If victim is in different user_ns, then uids are not
57174 comparable, so we must have CAP_SYS_PTRACE */
57175 @@ -2431,6 +2447,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57176 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57177 goto err_unlock;
57178 ok:
57179 +#endif
57180 head = p->robust_list;
57181 rcu_read_unlock();
57182 }
57183 @@ -2682,6 +2699,7 @@ static int __init futex_init(void)
57184 {
57185 u32 curval;
57186 int i;
57187 + mm_segment_t oldfs;
57188
57189 /*
57190 * This will fail and we want it. Some arch implementations do
57191 @@ -2693,8 +2711,11 @@ static int __init futex_init(void)
57192 * implementation, the non-functional ones will return
57193 * -ENOSYS.
57194 */
57195 + oldfs = get_fs();
57196 + set_fs(USER_DS);
57197 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57198 futex_cmpxchg_enabled = 1;
57199 + set_fs(oldfs);
57200
57201 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57202 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57203 diff -urNp linux-2.6.39.4/kernel/futex_compat.c linux-2.6.39.4/kernel/futex_compat.c
57204 --- linux-2.6.39.4/kernel/futex_compat.c 2011-05-19 00:06:34.000000000 -0400
57205 +++ linux-2.6.39.4/kernel/futex_compat.c 2011-08-05 19:44:37.000000000 -0400
57206 @@ -10,6 +10,7 @@
57207 #include <linux/compat.h>
57208 #include <linux/nsproxy.h>
57209 #include <linux/futex.h>
57210 +#include <linux/ptrace.h>
57211
57212 #include <asm/uaccess.h>
57213
57214 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57215 {
57216 struct compat_robust_list_head __user *head;
57217 unsigned long ret;
57218 - const struct cred *cred = current_cred(), *pcred;
57219 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57220 + const struct cred *cred = current_cred();
57221 + const struct cred *pcred;
57222 +#endif
57223
57224 if (!futex_cmpxchg_enabled)
57225 return -ENOSYS;
57226 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57227 if (!p)
57228 goto err_unlock;
57229 ret = -EPERM;
57230 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57231 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57232 + goto err_unlock;
57233 +#else
57234 pcred = __task_cred(p);
57235 /* If victim is in different user_ns, then uids are not
57236 comparable, so we must have CAP_SYS_PTRACE */
57237 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57238 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57239 goto err_unlock;
57240 ok:
57241 +#endif
57242 head = p->compat_robust_list;
57243 rcu_read_unlock();
57244 }
57245 diff -urNp linux-2.6.39.4/kernel/gcov/base.c linux-2.6.39.4/kernel/gcov/base.c
57246 --- linux-2.6.39.4/kernel/gcov/base.c 2011-05-19 00:06:34.000000000 -0400
57247 +++ linux-2.6.39.4/kernel/gcov/base.c 2011-08-05 19:44:37.000000000 -0400
57248 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
57249 }
57250
57251 #ifdef CONFIG_MODULES
57252 -static inline int within(void *addr, void *start, unsigned long size)
57253 -{
57254 - return ((addr >= start) && (addr < start + size));
57255 -}
57256 -
57257 /* Update list and generate events when modules are unloaded. */
57258 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57259 void *data)
57260 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57261 prev = NULL;
57262 /* Remove entries located in module from linked list. */
57263 for (info = gcov_info_head; info; info = info->next) {
57264 - if (within(info, mod->module_core, mod->core_size)) {
57265 + if (within_module_core_rw((unsigned long)info, mod)) {
57266 if (prev)
57267 prev->next = info->next;
57268 else
57269 diff -urNp linux-2.6.39.4/kernel/hrtimer.c linux-2.6.39.4/kernel/hrtimer.c
57270 --- linux-2.6.39.4/kernel/hrtimer.c 2011-05-19 00:06:34.000000000 -0400
57271 +++ linux-2.6.39.4/kernel/hrtimer.c 2011-08-05 19:44:37.000000000 -0400
57272 @@ -1383,7 +1383,7 @@ void hrtimer_peek_ahead_timers(void)
57273 local_irq_restore(flags);
57274 }
57275
57276 -static void run_hrtimer_softirq(struct softirq_action *h)
57277 +static void run_hrtimer_softirq(void)
57278 {
57279 hrtimer_peek_ahead_timers();
57280 }
57281 diff -urNp linux-2.6.39.4/kernel/irq/manage.c linux-2.6.39.4/kernel/irq/manage.c
57282 --- linux-2.6.39.4/kernel/irq/manage.c 2011-05-19 00:06:34.000000000 -0400
57283 +++ linux-2.6.39.4/kernel/irq/manage.c 2011-08-05 19:44:37.000000000 -0400
57284 @@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, u
57285 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
57286 int ret = 0;
57287
57288 + if (!desc)
57289 + return -EINVAL;
57290 +
57291 /* wakeup-capable irqs can be shared between drivers that
57292 * don't need to have the same sleep mode behaviors.
57293 */
57294 diff -urNp linux-2.6.39.4/kernel/jump_label.c linux-2.6.39.4/kernel/jump_label.c
57295 --- linux-2.6.39.4/kernel/jump_label.c 2011-05-19 00:06:34.000000000 -0400
57296 +++ linux-2.6.39.4/kernel/jump_label.c 2011-08-05 19:44:37.000000000 -0400
57297 @@ -49,6 +49,17 @@ void jump_label_unlock(void)
57298 mutex_unlock(&jump_label_mutex);
57299 }
57300
57301 +static void jump_label_swap(void *a, void *b, int size)
57302 +{
57303 + struct jump_entry t;
57304 +
57305 + t = *(struct jump_entry *)a;
57306 + pax_open_kernel();
57307 + *(struct jump_entry *)a = *(struct jump_entry *)b;
57308 + *(struct jump_entry *)b = t;
57309 + pax_close_kernel();
57310 +}
57311 +
57312 static int jump_label_cmp(const void *a, const void *b)
57313 {
57314 const struct jump_entry *jea = a;
57315 @@ -70,7 +81,7 @@ sort_jump_label_entries(struct jump_entr
57316
57317 size = (((unsigned long)stop - (unsigned long)start)
57318 / sizeof(struct jump_entry));
57319 - sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57320 + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, jump_label_swap);
57321 }
57322
57323 static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
57324 @@ -407,8 +418,11 @@ static void remove_jump_label_module_ini
57325 count = e_module->nr_entries;
57326 iter = e_module->table;
57327 while (count--) {
57328 - if (within_module_init(iter->code, mod))
57329 + if (within_module_init(iter->code, mod)) {
57330 + pax_open_kernel();
57331 iter->key = 0;
57332 + pax_close_kernel();
57333 + }
57334 iter++;
57335 }
57336 }
57337 diff -urNp linux-2.6.39.4/kernel/kallsyms.c linux-2.6.39.4/kernel/kallsyms.c
57338 --- linux-2.6.39.4/kernel/kallsyms.c 2011-05-19 00:06:34.000000000 -0400
57339 +++ linux-2.6.39.4/kernel/kallsyms.c 2011-08-05 19:44:37.000000000 -0400
57340 @@ -11,6 +11,9 @@
57341 * Changed the compression method from stem compression to "table lookup"
57342 * compression (see scripts/kallsyms.c for a more complete description)
57343 */
57344 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57345 +#define __INCLUDED_BY_HIDESYM 1
57346 +#endif
57347 #include <linux/kallsyms.h>
57348 #include <linux/module.h>
57349 #include <linux/init.h>
57350 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57351
57352 static inline int is_kernel_inittext(unsigned long addr)
57353 {
57354 + if (system_state != SYSTEM_BOOTING)
57355 + return 0;
57356 +
57357 if (addr >= (unsigned long)_sinittext
57358 && addr <= (unsigned long)_einittext)
57359 return 1;
57360 return 0;
57361 }
57362
57363 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57364 +#ifdef CONFIG_MODULES
57365 +static inline int is_module_text(unsigned long addr)
57366 +{
57367 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57368 + return 1;
57369 +
57370 + addr = ktla_ktva(addr);
57371 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57372 +}
57373 +#else
57374 +static inline int is_module_text(unsigned long addr)
57375 +{
57376 + return 0;
57377 +}
57378 +#endif
57379 +#endif
57380 +
57381 static inline int is_kernel_text(unsigned long addr)
57382 {
57383 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57384 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57385
57386 static inline int is_kernel(unsigned long addr)
57387 {
57388 +
57389 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57390 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
57391 + return 1;
57392 +
57393 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57394 +#else
57395 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57396 +#endif
57397 +
57398 return 1;
57399 return in_gate_area_no_mm(addr);
57400 }
57401
57402 static int is_ksym_addr(unsigned long addr)
57403 {
57404 +
57405 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57406 + if (is_module_text(addr))
57407 + return 0;
57408 +#endif
57409 +
57410 if (all_var)
57411 return is_kernel(addr);
57412
57413 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57414
57415 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57416 {
57417 - iter->name[0] = '\0';
57418 iter->nameoff = get_symbol_offset(new_pos);
57419 iter->pos = new_pos;
57420 }
57421 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57422 {
57423 struct kallsym_iter *iter = m->private;
57424
57425 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57426 + if (current_uid())
57427 + return 0;
57428 +#endif
57429 +
57430 /* Some debugging symbols have no name. Ignore them. */
57431 if (!iter->name[0])
57432 return 0;
57433 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57434 struct kallsym_iter *iter;
57435 int ret;
57436
57437 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57438 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57439 if (!iter)
57440 return -ENOMEM;
57441 reset_iter(iter, 0);
57442 diff -urNp linux-2.6.39.4/kernel/kmod.c linux-2.6.39.4/kernel/kmod.c
57443 --- linux-2.6.39.4/kernel/kmod.c 2011-05-19 00:06:34.000000000 -0400
57444 +++ linux-2.6.39.4/kernel/kmod.c 2011-08-05 19:44:37.000000000 -0400
57445 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57446 * If module auto-loading support is disabled then this function
57447 * becomes a no-operation.
57448 */
57449 -int __request_module(bool wait, const char *fmt, ...)
57450 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57451 {
57452 - va_list args;
57453 char module_name[MODULE_NAME_LEN];
57454 unsigned int max_modprobes;
57455 int ret;
57456 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57457 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57458 static char *envp[] = { "HOME=/",
57459 "TERM=linux",
57460 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57461 @@ -80,9 +79,7 @@ int __request_module(bool wait, const ch
57462 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57463 static int kmod_loop_msg;
57464
57465 - va_start(args, fmt);
57466 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57467 - va_end(args);
57468 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57469 if (ret >= MODULE_NAME_LEN)
57470 return -ENAMETOOLONG;
57471
57472 @@ -90,6 +87,20 @@ int __request_module(bool wait, const ch
57473 if (ret)
57474 return ret;
57475
57476 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57477 + if (!current_uid()) {
57478 + /* hack to workaround consolekit/udisks stupidity */
57479 + read_lock(&tasklist_lock);
57480 + if (!strcmp(current->comm, "mount") &&
57481 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57482 + read_unlock(&tasklist_lock);
57483 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57484 + return -EPERM;
57485 + }
57486 + read_unlock(&tasklist_lock);
57487 + }
57488 +#endif
57489 +
57490 /* If modprobe needs a service that is in a module, we get a recursive
57491 * loop. Limit the number of running kmod threads to max_threads/2 or
57492 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57493 @@ -123,6 +134,47 @@ int __request_module(bool wait, const ch
57494 atomic_dec(&kmod_concurrent);
57495 return ret;
57496 }
57497 +
57498 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57499 +{
57500 + va_list args;
57501 + int ret;
57502 +
57503 + va_start(args, fmt);
57504 + ret = ____request_module(wait, module_param, fmt, args);
57505 + va_end(args);
57506 +
57507 + return ret;
57508 +}
57509 +
57510 +int __request_module(bool wait, const char *fmt, ...)
57511 +{
57512 + va_list args;
57513 + int ret;
57514 +
57515 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57516 + if (current_uid()) {
57517 + char module_param[MODULE_NAME_LEN];
57518 +
57519 + memset(module_param, 0, sizeof(module_param));
57520 +
57521 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57522 +
57523 + va_start(args, fmt);
57524 + ret = ____request_module(wait, module_param, fmt, args);
57525 + va_end(args);
57526 +
57527 + return ret;
57528 + }
57529 +#endif
57530 +
57531 + va_start(args, fmt);
57532 + ret = ____request_module(wait, NULL, fmt, args);
57533 + va_end(args);
57534 +
57535 + return ret;
57536 +}
57537 +
57538 EXPORT_SYMBOL(__request_module);
57539 #endif /* CONFIG_MODULES */
57540
57541 diff -urNp linux-2.6.39.4/kernel/kprobes.c linux-2.6.39.4/kernel/kprobes.c
57542 --- linux-2.6.39.4/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
57543 +++ linux-2.6.39.4/kernel/kprobes.c 2011-08-05 19:44:37.000000000 -0400
57544 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57545 * kernel image and loaded module images reside. This is required
57546 * so x86_64 can correctly handle the %rip-relative fixups.
57547 */
57548 - kip->insns = module_alloc(PAGE_SIZE);
57549 + kip->insns = module_alloc_exec(PAGE_SIZE);
57550 if (!kip->insns) {
57551 kfree(kip);
57552 return NULL;
57553 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57554 */
57555 if (!list_is_singular(&kip->list)) {
57556 list_del(&kip->list);
57557 - module_free(NULL, kip->insns);
57558 + module_free_exec(NULL, kip->insns);
57559 kfree(kip);
57560 }
57561 return 1;
57562 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57563 {
57564 int i, err = 0;
57565 unsigned long offset = 0, size = 0;
57566 - char *modname, namebuf[128];
57567 + char *modname, namebuf[KSYM_NAME_LEN];
57568 const char *symbol_name;
57569 void *addr;
57570 struct kprobe_blackpoint *kb;
57571 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57572 const char *sym = NULL;
57573 unsigned int i = *(loff_t *) v;
57574 unsigned long offset = 0;
57575 - char *modname, namebuf[128];
57576 + char *modname, namebuf[KSYM_NAME_LEN];
57577
57578 head = &kprobe_table[i];
57579 preempt_disable();
57580 diff -urNp linux-2.6.39.4/kernel/lockdep.c linux-2.6.39.4/kernel/lockdep.c
57581 --- linux-2.6.39.4/kernel/lockdep.c 2011-06-25 12:55:23.000000000 -0400
57582 +++ linux-2.6.39.4/kernel/lockdep.c 2011-08-05 19:44:37.000000000 -0400
57583 @@ -571,6 +571,10 @@ static int static_obj(void *obj)
57584 end = (unsigned long) &_end,
57585 addr = (unsigned long) obj;
57586
57587 +#ifdef CONFIG_PAX_KERNEXEC
57588 + start = ktla_ktva(start);
57589 +#endif
57590 +
57591 /*
57592 * static variable?
57593 */
57594 @@ -706,6 +710,7 @@ register_lock_class(struct lockdep_map *
57595 if (!static_obj(lock->key)) {
57596 debug_locks_off();
57597 printk("INFO: trying to register non-static key.\n");
57598 + printk("lock:%pS key:%pS.\n", lock, lock->key);
57599 printk("the code is fine but needs lockdep annotation.\n");
57600 printk("turning off the locking correctness validator.\n");
57601 dump_stack();
57602 @@ -2752,7 +2757,7 @@ static int __lock_acquire(struct lockdep
57603 if (!class)
57604 return 0;
57605 }
57606 - atomic_inc((atomic_t *)&class->ops);
57607 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57608 if (very_verbose(class)) {
57609 printk("\nacquire class [%p] %s", class->key, class->name);
57610 if (class->name_version > 1)
57611 diff -urNp linux-2.6.39.4/kernel/lockdep_proc.c linux-2.6.39.4/kernel/lockdep_proc.c
57612 --- linux-2.6.39.4/kernel/lockdep_proc.c 2011-05-19 00:06:34.000000000 -0400
57613 +++ linux-2.6.39.4/kernel/lockdep_proc.c 2011-08-05 19:44:37.000000000 -0400
57614 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57615
57616 static void print_name(struct seq_file *m, struct lock_class *class)
57617 {
57618 - char str[128];
57619 + char str[KSYM_NAME_LEN];
57620 const char *name = class->name;
57621
57622 if (!name) {
57623 diff -urNp linux-2.6.39.4/kernel/module.c linux-2.6.39.4/kernel/module.c
57624 --- linux-2.6.39.4/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
57625 +++ linux-2.6.39.4/kernel/module.c 2011-08-05 19:44:37.000000000 -0400
57626 @@ -57,6 +57,7 @@
57627 #include <linux/kmemleak.h>
57628 #include <linux/jump_label.h>
57629 #include <linux/pfn.h>
57630 +#include <linux/grsecurity.h>
57631
57632 #define CREATE_TRACE_POINTS
57633 #include <trace/events/module.h>
57634 @@ -118,7 +119,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57635
57636 /* Bounds of module allocation, for speeding __module_address.
57637 * Protected by module_mutex. */
57638 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57639 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57640 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57641
57642 int register_module_notifier(struct notifier_block * nb)
57643 {
57644 @@ -282,7 +284,7 @@ bool each_symbol(bool (*fn)(const struct
57645 return true;
57646
57647 list_for_each_entry_rcu(mod, &modules, list) {
57648 - struct symsearch arr[] = {
57649 + struct symsearch modarr[] = {
57650 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57651 NOT_GPL_ONLY, false },
57652 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57653 @@ -304,7 +306,7 @@ bool each_symbol(bool (*fn)(const struct
57654 #endif
57655 };
57656
57657 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57658 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57659 return true;
57660 }
57661 return false;
57662 @@ -415,7 +417,7 @@ static inline void __percpu *mod_percpu(
57663 static int percpu_modalloc(struct module *mod,
57664 unsigned long size, unsigned long align)
57665 {
57666 - if (align > PAGE_SIZE) {
57667 + if (align-1 >= PAGE_SIZE) {
57668 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57669 mod->name, align, PAGE_SIZE);
57670 align = PAGE_SIZE;
57671 @@ -1143,7 +1145,7 @@ resolve_symbol_wait(struct module *mod,
57672 */
57673 #ifdef CONFIG_SYSFS
57674
57675 -#ifdef CONFIG_KALLSYMS
57676 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57677 static inline bool sect_empty(const Elf_Shdr *sect)
57678 {
57679 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57680 @@ -1612,17 +1614,17 @@ void unset_section_ro_nx(struct module *
57681 {
57682 unsigned long total_pages;
57683
57684 - if (mod->module_core == module_region) {
57685 + if (mod->module_core_rx == module_region) {
57686 /* Set core as NX+RW */
57687 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size);
57688 - set_memory_nx((unsigned long)mod->module_core, total_pages);
57689 - set_memory_rw((unsigned long)mod->module_core, total_pages);
57690 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_core_rx, mod->core_size_rx);
57691 + set_memory_nx((unsigned long)mod->module_core_rx, total_pages);
57692 + set_memory_rw((unsigned long)mod->module_core_rx, total_pages);
57693
57694 - } else if (mod->module_init == module_region) {
57695 + } else if (mod->module_init_rx == module_region) {
57696 /* Set init as NX+RW */
57697 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size);
57698 - set_memory_nx((unsigned long)mod->module_init, total_pages);
57699 - set_memory_rw((unsigned long)mod->module_init, total_pages);
57700 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_init_rx, mod->init_size_rx);
57701 + set_memory_nx((unsigned long)mod->module_init_rx, total_pages);
57702 + set_memory_rw((unsigned long)mod->module_init_rx, total_pages);
57703 }
57704 }
57705
57706 @@ -1633,14 +1635,14 @@ void set_all_modules_text_rw()
57707
57708 mutex_lock(&module_mutex);
57709 list_for_each_entry_rcu(mod, &modules, list) {
57710 - if ((mod->module_core) && (mod->core_text_size)) {
57711 - set_page_attributes(mod->module_core,
57712 - mod->module_core + mod->core_text_size,
57713 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57714 + set_page_attributes(mod->module_core_rx,
57715 + mod->module_core_rx + mod->core_size_rx,
57716 set_memory_rw);
57717 }
57718 - if ((mod->module_init) && (mod->init_text_size)) {
57719 - set_page_attributes(mod->module_init,
57720 - mod->module_init + mod->init_text_size,
57721 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57722 + set_page_attributes(mod->module_init_rx,
57723 + mod->module_init_rx + mod->init_size_rx,
57724 set_memory_rw);
57725 }
57726 }
57727 @@ -1654,14 +1656,14 @@ void set_all_modules_text_ro()
57728
57729 mutex_lock(&module_mutex);
57730 list_for_each_entry_rcu(mod, &modules, list) {
57731 - if ((mod->module_core) && (mod->core_text_size)) {
57732 - set_page_attributes(mod->module_core,
57733 - mod->module_core + mod->core_text_size,
57734 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57735 + set_page_attributes(mod->module_core_rx,
57736 + mod->module_core_rx + mod->core_size_rx,
57737 set_memory_ro);
57738 }
57739 - if ((mod->module_init) && (mod->init_text_size)) {
57740 - set_page_attributes(mod->module_init,
57741 - mod->module_init + mod->init_text_size,
57742 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57743 + set_page_attributes(mod->module_init_rx,
57744 + mod->module_init_rx + mod->init_size_rx,
57745 set_memory_ro);
57746 }
57747 }
57748 @@ -1696,17 +1698,20 @@ static void free_module(struct module *m
57749 destroy_params(mod->kp, mod->num_kp);
57750
57751 /* This may be NULL, but that's OK */
57752 - unset_section_ro_nx(mod, mod->module_init);
57753 - module_free(mod, mod->module_init);
57754 + unset_section_ro_nx(mod, mod->module_init_rx);
57755 + module_free(mod, mod->module_init_rw);
57756 + module_free_exec(mod, mod->module_init_rx);
57757 kfree(mod->args);
57758 percpu_modfree(mod);
57759
57760 /* Free lock-classes: */
57761 - lockdep_free_key_range(mod->module_core, mod->core_size);
57762 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57763 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57764
57765 /* Finally, free the core (containing the module structure) */
57766 - unset_section_ro_nx(mod, mod->module_core);
57767 - module_free(mod, mod->module_core);
57768 + unset_section_ro_nx(mod, mod->module_core_rx);
57769 + module_free_exec(mod, mod->module_core_rx);
57770 + module_free(mod, mod->module_core_rw);
57771
57772 #ifdef CONFIG_MPU
57773 update_protections(current->mm);
57774 @@ -1775,10 +1780,31 @@ static int simplify_symbols(struct modul
57775 unsigned int i;
57776 int ret = 0;
57777 const struct kernel_symbol *ksym;
57778 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57779 + int is_fs_load = 0;
57780 + int register_filesystem_found = 0;
57781 + char *p;
57782 +
57783 + p = strstr(mod->args, "grsec_modharden_fs");
57784 + if (p) {
57785 + char *endptr = p + strlen("grsec_modharden_fs");
57786 + /* copy \0 as well */
57787 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57788 + is_fs_load = 1;
57789 + }
57790 +#endif
57791
57792 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57793 const char *name = info->strtab + sym[i].st_name;
57794
57795 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57796 + /* it's a real shame this will never get ripped and copied
57797 + upstream! ;(
57798 + */
57799 + if (is_fs_load && !strcmp(name, "register_filesystem"))
57800 + register_filesystem_found = 1;
57801 +#endif
57802 +
57803 switch (sym[i].st_shndx) {
57804 case SHN_COMMON:
57805 /* We compiled with -fno-common. These are not
57806 @@ -1799,7 +1825,9 @@ static int simplify_symbols(struct modul
57807 ksym = resolve_symbol_wait(mod, info, name);
57808 /* Ok if resolved. */
57809 if (ksym && !IS_ERR(ksym)) {
57810 + pax_open_kernel();
57811 sym[i].st_value = ksym->value;
57812 + pax_close_kernel();
57813 break;
57814 }
57815
57816 @@ -1818,11 +1846,20 @@ static int simplify_symbols(struct modul
57817 secbase = (unsigned long)mod_percpu(mod);
57818 else
57819 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57820 + pax_open_kernel();
57821 sym[i].st_value += secbase;
57822 + pax_close_kernel();
57823 break;
57824 }
57825 }
57826
57827 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57828 + if (is_fs_load && !register_filesystem_found) {
57829 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57830 + ret = -EPERM;
57831 + }
57832 +#endif
57833 +
57834 return ret;
57835 }
57836
57837 @@ -1906,22 +1943,12 @@ static void layout_sections(struct modul
57838 || s->sh_entsize != ~0UL
57839 || strstarts(sname, ".init"))
57840 continue;
57841 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57842 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57843 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57844 + else
57845 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57846 DEBUGP("\t%s\n", name);
57847 }
57848 - switch (m) {
57849 - case 0: /* executable */
57850 - mod->core_size = debug_align(mod->core_size);
57851 - mod->core_text_size = mod->core_size;
57852 - break;
57853 - case 1: /* RO: text and ro-data */
57854 - mod->core_size = debug_align(mod->core_size);
57855 - mod->core_ro_size = mod->core_size;
57856 - break;
57857 - case 3: /* whole core */
57858 - mod->core_size = debug_align(mod->core_size);
57859 - break;
57860 - }
57861 }
57862
57863 DEBUGP("Init section allocation order:\n");
57864 @@ -1935,23 +1962,13 @@ static void layout_sections(struct modul
57865 || s->sh_entsize != ~0UL
57866 || !strstarts(sname, ".init"))
57867 continue;
57868 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57869 - | INIT_OFFSET_MASK);
57870 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57871 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57872 + else
57873 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57874 + s->sh_entsize |= INIT_OFFSET_MASK;
57875 DEBUGP("\t%s\n", sname);
57876 }
57877 - switch (m) {
57878 - case 0: /* executable */
57879 - mod->init_size = debug_align(mod->init_size);
57880 - mod->init_text_size = mod->init_size;
57881 - break;
57882 - case 1: /* RO: text and ro-data */
57883 - mod->init_size = debug_align(mod->init_size);
57884 - mod->init_ro_size = mod->init_size;
57885 - break;
57886 - case 3: /* whole init */
57887 - mod->init_size = debug_align(mod->init_size);
57888 - break;
57889 - }
57890 }
57891 }
57892
57893 @@ -2119,7 +2136,7 @@ static void layout_symtab(struct module
57894
57895 /* Put symbol section at end of init part of module. */
57896 symsect->sh_flags |= SHF_ALLOC;
57897 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57898 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57899 info->index.sym) | INIT_OFFSET_MASK;
57900 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57901
57902 @@ -2136,19 +2153,19 @@ static void layout_symtab(struct module
57903 }
57904
57905 /* Append room for core symbols at end of core part. */
57906 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57907 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57908 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57909 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57910
57911 /* Put string table section at end of init part of module. */
57912 strsect->sh_flags |= SHF_ALLOC;
57913 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57914 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57915 info->index.str) | INIT_OFFSET_MASK;
57916 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57917
57918 /* Append room for core symbols' strings at end of core part. */
57919 - info->stroffs = mod->core_size;
57920 + info->stroffs = mod->core_size_rx;
57921 __set_bit(0, info->strmap);
57922 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57923 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57924 }
57925
57926 static void add_kallsyms(struct module *mod, const struct load_info *info)
57927 @@ -2164,11 +2181,13 @@ static void add_kallsyms(struct module *
57928 /* Make sure we get permanent strtab: don't use info->strtab. */
57929 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57930
57931 + pax_open_kernel();
57932 +
57933 /* Set types up while we still have access to sections. */
57934 for (i = 0; i < mod->num_symtab; i++)
57935 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57936
57937 - mod->core_symtab = dst = mod->module_core + info->symoffs;
57938 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57939 src = mod->symtab;
57940 *dst = *src;
57941 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57942 @@ -2181,10 +2200,12 @@ static void add_kallsyms(struct module *
57943 }
57944 mod->core_num_syms = ndst;
57945
57946 - mod->core_strtab = s = mod->module_core + info->stroffs;
57947 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57948 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57949 if (test_bit(i, info->strmap))
57950 *++s = mod->strtab[i];
57951 +
57952 + pax_close_kernel();
57953 }
57954 #else
57955 static inline void layout_symtab(struct module *mod, struct load_info *info)
57956 @@ -2213,17 +2234,33 @@ static void dynamic_debug_remove(struct
57957 ddebug_remove_module(debug->modname);
57958 }
57959
57960 -static void *module_alloc_update_bounds(unsigned long size)
57961 +static void *module_alloc_update_bounds_rw(unsigned long size)
57962 {
57963 void *ret = module_alloc(size);
57964
57965 if (ret) {
57966 mutex_lock(&module_mutex);
57967 /* Update module bounds. */
57968 - if ((unsigned long)ret < module_addr_min)
57969 - module_addr_min = (unsigned long)ret;
57970 - if ((unsigned long)ret + size > module_addr_max)
57971 - module_addr_max = (unsigned long)ret + size;
57972 + if ((unsigned long)ret < module_addr_min_rw)
57973 + module_addr_min_rw = (unsigned long)ret;
57974 + if ((unsigned long)ret + size > module_addr_max_rw)
57975 + module_addr_max_rw = (unsigned long)ret + size;
57976 + mutex_unlock(&module_mutex);
57977 + }
57978 + return ret;
57979 +}
57980 +
57981 +static void *module_alloc_update_bounds_rx(unsigned long size)
57982 +{
57983 + void *ret = module_alloc_exec(size);
57984 +
57985 + if (ret) {
57986 + mutex_lock(&module_mutex);
57987 + /* Update module bounds. */
57988 + if ((unsigned long)ret < module_addr_min_rx)
57989 + module_addr_min_rx = (unsigned long)ret;
57990 + if ((unsigned long)ret + size > module_addr_max_rx)
57991 + module_addr_max_rx = (unsigned long)ret + size;
57992 mutex_unlock(&module_mutex);
57993 }
57994 return ret;
57995 @@ -2516,7 +2553,7 @@ static int move_module(struct module *mo
57996 void *ptr;
57997
57998 /* Do the allocs. */
57999 - ptr = module_alloc_update_bounds(mod->core_size);
58000 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
58001 /*
58002 * The pointer to this block is stored in the module structure
58003 * which is inside the block. Just mark it as not being a
58004 @@ -2526,23 +2563,50 @@ static int move_module(struct module *mo
58005 if (!ptr)
58006 return -ENOMEM;
58007
58008 - memset(ptr, 0, mod->core_size);
58009 - mod->module_core = ptr;
58010 + memset(ptr, 0, mod->core_size_rw);
58011 + mod->module_core_rw = ptr;
58012
58013 - ptr = module_alloc_update_bounds(mod->init_size);
58014 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
58015 /*
58016 * The pointer to this block is stored in the module structure
58017 * which is inside the block. This block doesn't need to be
58018 * scanned as it contains data and code that will be freed
58019 * after the module is initialized.
58020 */
58021 - kmemleak_ignore(ptr);
58022 - if (!ptr && mod->init_size) {
58023 - module_free(mod, mod->module_core);
58024 + kmemleak_not_leak(ptr);
58025 + if (!ptr && mod->init_size_rw) {
58026 + module_free(mod, mod->module_core_rw);
58027 return -ENOMEM;
58028 }
58029 - memset(ptr, 0, mod->init_size);
58030 - mod->module_init = ptr;
58031 + memset(ptr, 0, mod->init_size_rw);
58032 + mod->module_init_rw = ptr;
58033 +
58034 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
58035 + kmemleak_not_leak(ptr);
58036 + if (!ptr) {
58037 + module_free(mod, mod->module_init_rw);
58038 + module_free(mod, mod->module_core_rw);
58039 + return -ENOMEM;
58040 + }
58041 +
58042 + pax_open_kernel();
58043 + memset(ptr, 0, mod->core_size_rx);
58044 + pax_close_kernel();
58045 + mod->module_core_rx = ptr;
58046 +
58047 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
58048 + kmemleak_not_leak(ptr);
58049 + if (!ptr && mod->init_size_rx) {
58050 + module_free_exec(mod, mod->module_core_rx);
58051 + module_free(mod, mod->module_init_rw);
58052 + module_free(mod, mod->module_core_rw);
58053 + return -ENOMEM;
58054 + }
58055 +
58056 + pax_open_kernel();
58057 + memset(ptr, 0, mod->init_size_rx);
58058 + pax_close_kernel();
58059 + mod->module_init_rx = ptr;
58060
58061 /* Transfer each section which specifies SHF_ALLOC */
58062 DEBUGP("final section addresses:\n");
58063 @@ -2553,16 +2617,45 @@ static int move_module(struct module *mo
58064 if (!(shdr->sh_flags & SHF_ALLOC))
58065 continue;
58066
58067 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
58068 - dest = mod->module_init
58069 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58070 - else
58071 - dest = mod->module_core + shdr->sh_entsize;
58072 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
58073 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58074 + dest = mod->module_init_rw
58075 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58076 + else
58077 + dest = mod->module_init_rx
58078 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58079 + } else {
58080 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58081 + dest = mod->module_core_rw + shdr->sh_entsize;
58082 + else
58083 + dest = mod->module_core_rx + shdr->sh_entsize;
58084 + }
58085 +
58086 + if (shdr->sh_type != SHT_NOBITS) {
58087 +
58088 +#ifdef CONFIG_PAX_KERNEXEC
58089 +#ifdef CONFIG_X86_64
58090 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
58091 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
58092 +#endif
58093 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
58094 + pax_open_kernel();
58095 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58096 + pax_close_kernel();
58097 + } else
58098 +#endif
58099
58100 - if (shdr->sh_type != SHT_NOBITS)
58101 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58102 + }
58103 /* Update sh_addr to point to copy in image. */
58104 - shdr->sh_addr = (unsigned long)dest;
58105 +
58106 +#ifdef CONFIG_PAX_KERNEXEC
58107 + if (shdr->sh_flags & SHF_EXECINSTR)
58108 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
58109 + else
58110 +#endif
58111 +
58112 + shdr->sh_addr = (unsigned long)dest;
58113 DEBUGP("\t0x%lx %s\n",
58114 shdr->sh_addr, info->secstrings + shdr->sh_name);
58115 }
58116 @@ -2613,12 +2706,12 @@ static void flush_module_icache(const st
58117 * Do it before processing of module parameters, so the module
58118 * can provide parameter accessor functions of its own.
58119 */
58120 - if (mod->module_init)
58121 - flush_icache_range((unsigned long)mod->module_init,
58122 - (unsigned long)mod->module_init
58123 - + mod->init_size);
58124 - flush_icache_range((unsigned long)mod->module_core,
58125 - (unsigned long)mod->module_core + mod->core_size);
58126 + if (mod->module_init_rx)
58127 + flush_icache_range((unsigned long)mod->module_init_rx,
58128 + (unsigned long)mod->module_init_rx
58129 + + mod->init_size_rx);
58130 + flush_icache_range((unsigned long)mod->module_core_rx,
58131 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
58132
58133 set_fs(old_fs);
58134 }
58135 @@ -2690,8 +2783,10 @@ static void module_deallocate(struct mod
58136 {
58137 kfree(info->strmap);
58138 percpu_modfree(mod);
58139 - module_free(mod, mod->module_init);
58140 - module_free(mod, mod->module_core);
58141 + module_free_exec(mod, mod->module_init_rx);
58142 + module_free_exec(mod, mod->module_core_rx);
58143 + module_free(mod, mod->module_init_rw);
58144 + module_free(mod, mod->module_core_rw);
58145 }
58146
58147 static int post_relocation(struct module *mod, const struct load_info *info)
58148 @@ -2748,9 +2843,38 @@ static struct module *load_module(void _
58149 if (err)
58150 goto free_unload;
58151
58152 + /* Now copy in args */
58153 + mod->args = strndup_user(uargs, ~0UL >> 1);
58154 + if (IS_ERR(mod->args)) {
58155 + err = PTR_ERR(mod->args);
58156 + goto free_unload;
58157 + }
58158 +
58159 /* Set up MODINFO_ATTR fields */
58160 setup_modinfo(mod, &info);
58161
58162 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
58163 + {
58164 + char *p, *p2;
58165 +
58166 + if (strstr(mod->args, "grsec_modharden_netdev")) {
58167 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
58168 + err = -EPERM;
58169 + goto free_modinfo;
58170 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
58171 + p += strlen("grsec_modharden_normal");
58172 + p2 = strstr(p, "_");
58173 + if (p2) {
58174 + *p2 = '\0';
58175 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
58176 + *p2 = '_';
58177 + }
58178 + err = -EPERM;
58179 + goto free_modinfo;
58180 + }
58181 + }
58182 +#endif
58183 +
58184 /* Fix up syms, so that st_value is a pointer to location. */
58185 err = simplify_symbols(mod, &info);
58186 if (err < 0)
58187 @@ -2766,13 +2890,6 @@ static struct module *load_module(void _
58188
58189 flush_module_icache(mod);
58190
58191 - /* Now copy in args */
58192 - mod->args = strndup_user(uargs, ~0UL >> 1);
58193 - if (IS_ERR(mod->args)) {
58194 - err = PTR_ERR(mod->args);
58195 - goto free_arch_cleanup;
58196 - }
58197 -
58198 /* Mark state as coming so strong_try_module_get() ignores us. */
58199 mod->state = MODULE_STATE_COMING;
58200
58201 @@ -2832,11 +2949,10 @@ static struct module *load_module(void _
58202 unlock:
58203 mutex_unlock(&module_mutex);
58204 synchronize_sched();
58205 - kfree(mod->args);
58206 - free_arch_cleanup:
58207 module_arch_cleanup(mod);
58208 free_modinfo:
58209 free_modinfo(mod);
58210 + kfree(mod->args);
58211 free_unload:
58212 module_unload_free(mod);
58213 free_module:
58214 @@ -2877,16 +2993,16 @@ SYSCALL_DEFINE3(init_module, void __user
58215 MODULE_STATE_COMING, mod);
58216
58217 /* Set RO and NX regions for core */
58218 - set_section_ro_nx(mod->module_core,
58219 - mod->core_text_size,
58220 - mod->core_ro_size,
58221 - mod->core_size);
58222 + set_section_ro_nx(mod->module_core_rx,
58223 + mod->core_size_rx,
58224 + mod->core_size_rx,
58225 + mod->core_size_rx);
58226
58227 /* Set RO and NX regions for init */
58228 - set_section_ro_nx(mod->module_init,
58229 - mod->init_text_size,
58230 - mod->init_ro_size,
58231 - mod->init_size);
58232 + set_section_ro_nx(mod->module_init_rx,
58233 + mod->init_size_rx,
58234 + mod->init_size_rx,
58235 + mod->init_size_rx);
58236
58237 do_mod_ctors(mod);
58238 /* Start the module */
58239 @@ -2931,11 +3047,13 @@ SYSCALL_DEFINE3(init_module, void __user
58240 mod->symtab = mod->core_symtab;
58241 mod->strtab = mod->core_strtab;
58242 #endif
58243 - unset_section_ro_nx(mod, mod->module_init);
58244 - module_free(mod, mod->module_init);
58245 - mod->module_init = NULL;
58246 - mod->init_size = 0;
58247 - mod->init_text_size = 0;
58248 + unset_section_ro_nx(mod, mod->module_init_rx);
58249 + module_free(mod, mod->module_init_rw);
58250 + module_free_exec(mod, mod->module_init_rx);
58251 + mod->module_init_rw = NULL;
58252 + mod->module_init_rx = NULL;
58253 + mod->init_size_rw = 0;
58254 + mod->init_size_rx = 0;
58255 mutex_unlock(&module_mutex);
58256
58257 return 0;
58258 @@ -2966,10 +3084,16 @@ static const char *get_ksymbol(struct mo
58259 unsigned long nextval;
58260
58261 /* At worse, next value is at end of module */
58262 - if (within_module_init(addr, mod))
58263 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
58264 + if (within_module_init_rx(addr, mod))
58265 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58266 + else if (within_module_init_rw(addr, mod))
58267 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58268 + else if (within_module_core_rx(addr, mod))
58269 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58270 + else if (within_module_core_rw(addr, mod))
58271 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58272 else
58273 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
58274 + return NULL;
58275
58276 /* Scan for closest preceding symbol, and next symbol. (ELF
58277 starts real symbols at 1). */
58278 @@ -3215,7 +3339,7 @@ static int m_show(struct seq_file *m, vo
58279 char buf[8];
58280
58281 seq_printf(m, "%s %u",
58282 - mod->name, mod->init_size + mod->core_size);
58283 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58284 print_unload_info(m, mod);
58285
58286 /* Informative for users. */
58287 @@ -3224,7 +3348,7 @@ static int m_show(struct seq_file *m, vo
58288 mod->state == MODULE_STATE_COMING ? "Loading":
58289 "Live");
58290 /* Used by oprofile and other similar tools. */
58291 - seq_printf(m, " 0x%pK", mod->module_core);
58292 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58293
58294 /* Taints info */
58295 if (mod->taints)
58296 @@ -3260,7 +3384,17 @@ static const struct file_operations proc
58297
58298 static int __init proc_modules_init(void)
58299 {
58300 +#ifndef CONFIG_GRKERNSEC_HIDESYM
58301 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58302 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58303 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58304 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58305 +#else
58306 proc_create("modules", 0, NULL, &proc_modules_operations);
58307 +#endif
58308 +#else
58309 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58310 +#endif
58311 return 0;
58312 }
58313 module_init(proc_modules_init);
58314 @@ -3319,12 +3453,12 @@ struct module *__module_address(unsigned
58315 {
58316 struct module *mod;
58317
58318 - if (addr < module_addr_min || addr > module_addr_max)
58319 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58320 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
58321 return NULL;
58322
58323 list_for_each_entry_rcu(mod, &modules, list)
58324 - if (within_module_core(addr, mod)
58325 - || within_module_init(addr, mod))
58326 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
58327 return mod;
58328 return NULL;
58329 }
58330 @@ -3358,11 +3492,20 @@ bool is_module_text_address(unsigned lon
58331 */
58332 struct module *__module_text_address(unsigned long addr)
58333 {
58334 - struct module *mod = __module_address(addr);
58335 + struct module *mod;
58336 +
58337 +#ifdef CONFIG_X86_32
58338 + addr = ktla_ktva(addr);
58339 +#endif
58340 +
58341 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58342 + return NULL;
58343 +
58344 + mod = __module_address(addr);
58345 +
58346 if (mod) {
58347 /* Make sure it's within the text section. */
58348 - if (!within(addr, mod->module_init, mod->init_text_size)
58349 - && !within(addr, mod->module_core, mod->core_text_size))
58350 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58351 mod = NULL;
58352 }
58353 return mod;
58354 diff -urNp linux-2.6.39.4/kernel/mutex.c linux-2.6.39.4/kernel/mutex.c
58355 --- linux-2.6.39.4/kernel/mutex.c 2011-05-19 00:06:34.000000000 -0400
58356 +++ linux-2.6.39.4/kernel/mutex.c 2011-08-05 19:44:37.000000000 -0400
58357 @@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock,
58358 */
58359
58360 for (;;) {
58361 - struct thread_info *owner;
58362 + struct task_struct *owner;
58363
58364 /*
58365 * If we own the BKL, then don't spin. The owner of
58366 @@ -205,7 +205,7 @@ __mutex_lock_common(struct mutex *lock,
58367 spin_lock_mutex(&lock->wait_lock, flags);
58368
58369 debug_mutex_lock_common(lock, &waiter);
58370 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58371 + debug_mutex_add_waiter(lock, &waiter, task);
58372
58373 /* add waiting tasks to the end of the waitqueue (FIFO): */
58374 list_add_tail(&waiter.list, &lock->wait_list);
58375 @@ -234,8 +234,7 @@ __mutex_lock_common(struct mutex *lock,
58376 * TASK_UNINTERRUPTIBLE case.)
58377 */
58378 if (unlikely(signal_pending_state(state, task))) {
58379 - mutex_remove_waiter(lock, &waiter,
58380 - task_thread_info(task));
58381 + mutex_remove_waiter(lock, &waiter, task);
58382 mutex_release(&lock->dep_map, 1, ip);
58383 spin_unlock_mutex(&lock->wait_lock, flags);
58384
58385 @@ -256,7 +255,7 @@ __mutex_lock_common(struct mutex *lock,
58386 done:
58387 lock_acquired(&lock->dep_map, ip);
58388 /* got the lock - rejoice! */
58389 - mutex_remove_waiter(lock, &waiter, current_thread_info());
58390 + mutex_remove_waiter(lock, &waiter, task);
58391 mutex_set_owner(lock);
58392
58393 /* set it to 0 if there are no waiters left: */
58394 diff -urNp linux-2.6.39.4/kernel/mutex-debug.c linux-2.6.39.4/kernel/mutex-debug.c
58395 --- linux-2.6.39.4/kernel/mutex-debug.c 2011-05-19 00:06:34.000000000 -0400
58396 +++ linux-2.6.39.4/kernel/mutex-debug.c 2011-08-05 19:44:37.000000000 -0400
58397 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58398 }
58399
58400 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58401 - struct thread_info *ti)
58402 + struct task_struct *task)
58403 {
58404 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58405
58406 /* Mark the current thread as blocked on the lock: */
58407 - ti->task->blocked_on = waiter;
58408 + task->blocked_on = waiter;
58409 }
58410
58411 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58412 - struct thread_info *ti)
58413 + struct task_struct *task)
58414 {
58415 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58416 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58417 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58418 - ti->task->blocked_on = NULL;
58419 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
58420 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58421 + task->blocked_on = NULL;
58422
58423 list_del_init(&waiter->list);
58424 waiter->task = NULL;
58425 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
58426 return;
58427
58428 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
58429 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
58430 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
58431 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
58432 mutex_clear_owner(lock);
58433 }
58434 diff -urNp linux-2.6.39.4/kernel/mutex-debug.h linux-2.6.39.4/kernel/mutex-debug.h
58435 --- linux-2.6.39.4/kernel/mutex-debug.h 2011-05-19 00:06:34.000000000 -0400
58436 +++ linux-2.6.39.4/kernel/mutex-debug.h 2011-08-05 19:44:37.000000000 -0400
58437 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
58438 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58439 extern void debug_mutex_add_waiter(struct mutex *lock,
58440 struct mutex_waiter *waiter,
58441 - struct thread_info *ti);
58442 + struct task_struct *task);
58443 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58444 - struct thread_info *ti);
58445 + struct task_struct *task);
58446 extern void debug_mutex_unlock(struct mutex *lock);
58447 extern void debug_mutex_init(struct mutex *lock, const char *name,
58448 struct lock_class_key *key);
58449
58450 static inline void mutex_set_owner(struct mutex *lock)
58451 {
58452 - lock->owner = current_thread_info();
58453 + lock->owner = current;
58454 }
58455
58456 static inline void mutex_clear_owner(struct mutex *lock)
58457 diff -urNp linux-2.6.39.4/kernel/mutex.h linux-2.6.39.4/kernel/mutex.h
58458 --- linux-2.6.39.4/kernel/mutex.h 2011-05-19 00:06:34.000000000 -0400
58459 +++ linux-2.6.39.4/kernel/mutex.h 2011-08-05 19:44:37.000000000 -0400
58460 @@ -19,7 +19,7 @@
58461 #ifdef CONFIG_SMP
58462 static inline void mutex_set_owner(struct mutex *lock)
58463 {
58464 - lock->owner = current_thread_info();
58465 + lock->owner = current;
58466 }
58467
58468 static inline void mutex_clear_owner(struct mutex *lock)
58469 diff -urNp linux-2.6.39.4/kernel/padata.c linux-2.6.39.4/kernel/padata.c
58470 --- linux-2.6.39.4/kernel/padata.c 2011-05-19 00:06:34.000000000 -0400
58471 +++ linux-2.6.39.4/kernel/padata.c 2011-08-05 19:44:37.000000000 -0400
58472 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58473 padata->pd = pd;
58474 padata->cb_cpu = cb_cpu;
58475
58476 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58477 - atomic_set(&pd->seq_nr, -1);
58478 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58479 + atomic_set_unchecked(&pd->seq_nr, -1);
58480
58481 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58482 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58483
58484 target_cpu = padata_cpu_hash(padata);
58485 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58486 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58487 padata_init_pqueues(pd);
58488 padata_init_squeues(pd);
58489 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58490 - atomic_set(&pd->seq_nr, -1);
58491 + atomic_set_unchecked(&pd->seq_nr, -1);
58492 atomic_set(&pd->reorder_objects, 0);
58493 atomic_set(&pd->refcnt, 0);
58494 pd->pinst = pinst;
58495 diff -urNp linux-2.6.39.4/kernel/panic.c linux-2.6.39.4/kernel/panic.c
58496 --- linux-2.6.39.4/kernel/panic.c 2011-05-19 00:06:34.000000000 -0400
58497 +++ linux-2.6.39.4/kernel/panic.c 2011-08-05 19:44:37.000000000 -0400
58498 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58499 const char *board;
58500
58501 printk(KERN_WARNING "------------[ cut here ]------------\n");
58502 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58503 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58504 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58505 if (board)
58506 printk(KERN_WARNING "Hardware name: %s\n", board);
58507 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58508 */
58509 void __stack_chk_fail(void)
58510 {
58511 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
58512 + dump_stack();
58513 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58514 __builtin_return_address(0));
58515 }
58516 EXPORT_SYMBOL(__stack_chk_fail);
58517 diff -urNp linux-2.6.39.4/kernel/perf_event.c linux-2.6.39.4/kernel/perf_event.c
58518 --- linux-2.6.39.4/kernel/perf_event.c 2011-05-19 00:06:34.000000000 -0400
58519 +++ linux-2.6.39.4/kernel/perf_event.c 2011-08-05 20:34:06.000000000 -0400
58520 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
58521 return 0;
58522 }
58523
58524 -static atomic64_t perf_event_id;
58525 +static atomic64_unchecked_t perf_event_id;
58526
58527 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
58528 enum event_type_t event_type);
58529 @@ -2496,7 +2496,7 @@ static void __perf_event_read(void *info
58530
58531 static inline u64 perf_event_count(struct perf_event *event)
58532 {
58533 - return local64_read(&event->count) + atomic64_read(&event->child_count);
58534 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
58535 }
58536
58537 static u64 perf_event_read(struct perf_event *event)
58538 @@ -3031,9 +3031,9 @@ u64 perf_event_read_value(struct perf_ev
58539 mutex_lock(&event->child_mutex);
58540 total += perf_event_read(event);
58541 *enabled += event->total_time_enabled +
58542 - atomic64_read(&event->child_total_time_enabled);
58543 + atomic64_read_unchecked(&event->child_total_time_enabled);
58544 *running += event->total_time_running +
58545 - atomic64_read(&event->child_total_time_running);
58546 + atomic64_read_unchecked(&event->child_total_time_running);
58547
58548 list_for_each_entry(child, &event->child_list, child_list) {
58549 total += perf_event_read(child);
58550 @@ -3396,10 +3396,10 @@ void perf_event_update_userpage(struct p
58551 userpg->offset -= local64_read(&event->hw.prev_count);
58552
58553 userpg->time_enabled = event->total_time_enabled +
58554 - atomic64_read(&event->child_total_time_enabled);
58555 + atomic64_read_unchecked(&event->child_total_time_enabled);
58556
58557 userpg->time_running = event->total_time_running +
58558 - atomic64_read(&event->child_total_time_running);
58559 + atomic64_read_unchecked(&event->child_total_time_running);
58560
58561 barrier();
58562 ++userpg->lock;
58563 @@ -4196,11 +4196,11 @@ static void perf_output_read_one(struct
58564 values[n++] = perf_event_count(event);
58565 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
58566 values[n++] = enabled +
58567 - atomic64_read(&event->child_total_time_enabled);
58568 + atomic64_read_unchecked(&event->child_total_time_enabled);
58569 }
58570 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
58571 values[n++] = running +
58572 - atomic64_read(&event->child_total_time_running);
58573 + atomic64_read_unchecked(&event->child_total_time_running);
58574 }
58575 if (read_format & PERF_FORMAT_ID)
58576 values[n++] = primary_event_id(event);
58577 @@ -6201,7 +6201,7 @@ perf_event_alloc(struct perf_event_attr
58578 event->parent = parent_event;
58579
58580 event->ns = get_pid_ns(current->nsproxy->pid_ns);
58581 - event->id = atomic64_inc_return(&perf_event_id);
58582 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
58583
58584 event->state = PERF_EVENT_STATE_INACTIVE;
58585
58586 @@ -6724,10 +6724,10 @@ static void sync_child_event(struct perf
58587 /*
58588 * Add back the child's count to the parent's count:
58589 */
58590 - atomic64_add(child_val, &parent_event->child_count);
58591 - atomic64_add(child_event->total_time_enabled,
58592 + atomic64_add_unchecked(child_val, &parent_event->child_count);
58593 + atomic64_add_unchecked(child_event->total_time_enabled,
58594 &parent_event->child_total_time_enabled);
58595 - atomic64_add(child_event->total_time_running,
58596 + atomic64_add_unchecked(child_event->total_time_running,
58597 &parent_event->child_total_time_running);
58598
58599 /*
58600 diff -urNp linux-2.6.39.4/kernel/pid.c linux-2.6.39.4/kernel/pid.c
58601 --- linux-2.6.39.4/kernel/pid.c 2011-05-19 00:06:34.000000000 -0400
58602 +++ linux-2.6.39.4/kernel/pid.c 2011-08-05 19:44:37.000000000 -0400
58603 @@ -33,6 +33,7 @@
58604 #include <linux/rculist.h>
58605 #include <linux/bootmem.h>
58606 #include <linux/hash.h>
58607 +#include <linux/security.h>
58608 #include <linux/pid_namespace.h>
58609 #include <linux/init_task.h>
58610 #include <linux/syscalls.h>
58611 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58612
58613 int pid_max = PID_MAX_DEFAULT;
58614
58615 -#define RESERVED_PIDS 300
58616 +#define RESERVED_PIDS 500
58617
58618 int pid_max_min = RESERVED_PIDS + 1;
58619 int pid_max_max = PID_MAX_LIMIT;
58620 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58621 */
58622 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58623 {
58624 + struct task_struct *task;
58625 +
58626 rcu_lockdep_assert(rcu_read_lock_held());
58627 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58628 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58629 +
58630 + if (gr_pid_is_chrooted(task))
58631 + return NULL;
58632 +
58633 + return task;
58634 }
58635
58636 struct task_struct *find_task_by_vpid(pid_t vnr)
58637 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58638 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58639 }
58640
58641 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58642 +{
58643 + rcu_lockdep_assert(rcu_read_lock_held());
58644 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58645 +}
58646 +
58647 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58648 {
58649 struct pid *pid;
58650 diff -urNp linux-2.6.39.4/kernel/posix-cpu-timers.c linux-2.6.39.4/kernel/posix-cpu-timers.c
58651 --- linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-05-19 00:06:34.000000000 -0400
58652 +++ linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-08-06 09:34:48.000000000 -0400
58653 @@ -6,6 +6,7 @@
58654 #include <linux/posix-timers.h>
58655 #include <linux/errno.h>
58656 #include <linux/math64.h>
58657 +#include <linux/security.h>
58658 #include <asm/uaccess.h>
58659 #include <linux/kernel_stat.h>
58660 #include <trace/events/timer.h>
58661 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58662
58663 static __init int init_posix_cpu_timers(void)
58664 {
58665 - struct k_clock process = {
58666 + static struct k_clock process = {
58667 .clock_getres = process_cpu_clock_getres,
58668 .clock_get = process_cpu_clock_get,
58669 .timer_create = process_cpu_timer_create,
58670 .nsleep = process_cpu_nsleep,
58671 .nsleep_restart = process_cpu_nsleep_restart,
58672 };
58673 - struct k_clock thread = {
58674 + static struct k_clock thread = {
58675 .clock_getres = thread_cpu_clock_getres,
58676 .clock_get = thread_cpu_clock_get,
58677 .timer_create = thread_cpu_timer_create,
58678 diff -urNp linux-2.6.39.4/kernel/posix-timers.c linux-2.6.39.4/kernel/posix-timers.c
58679 --- linux-2.6.39.4/kernel/posix-timers.c 2011-05-19 00:06:34.000000000 -0400
58680 +++ linux-2.6.39.4/kernel/posix-timers.c 2011-08-06 09:30:46.000000000 -0400
58681 @@ -43,6 +43,7 @@
58682 #include <linux/idr.h>
58683 #include <linux/posix-clock.h>
58684 #include <linux/posix-timers.h>
58685 +#include <linux/grsecurity.h>
58686 #include <linux/syscalls.h>
58687 #include <linux/wait.h>
58688 #include <linux/workqueue.h>
58689 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58690 * which we beg off on and pass to do_sys_settimeofday().
58691 */
58692
58693 -static struct k_clock posix_clocks[MAX_CLOCKS];
58694 +static struct k_clock *posix_clocks[MAX_CLOCKS];
58695
58696 /*
58697 * These ones are defined below.
58698 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58699 */
58700 static __init int init_posix_timers(void)
58701 {
58702 - struct k_clock clock_realtime = {
58703 + static struct k_clock clock_realtime = {
58704 .clock_getres = hrtimer_get_res,
58705 .clock_get = posix_clock_realtime_get,
58706 .clock_set = posix_clock_realtime_set,
58707 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58708 .timer_get = common_timer_get,
58709 .timer_del = common_timer_del,
58710 };
58711 - struct k_clock clock_monotonic = {
58712 + static struct k_clock clock_monotonic = {
58713 .clock_getres = hrtimer_get_res,
58714 .clock_get = posix_ktime_get_ts,
58715 .nsleep = common_nsleep,
58716 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58717 .timer_get = common_timer_get,
58718 .timer_del = common_timer_del,
58719 };
58720 - struct k_clock clock_monotonic_raw = {
58721 + static struct k_clock clock_monotonic_raw = {
58722 .clock_getres = hrtimer_get_res,
58723 .clock_get = posix_get_monotonic_raw,
58724 };
58725 - struct k_clock clock_realtime_coarse = {
58726 + static struct k_clock clock_realtime_coarse = {
58727 .clock_getres = posix_get_coarse_res,
58728 .clock_get = posix_get_realtime_coarse,
58729 };
58730 - struct k_clock clock_monotonic_coarse = {
58731 + static struct k_clock clock_monotonic_coarse = {
58732 .clock_getres = posix_get_coarse_res,
58733 .clock_get = posix_get_monotonic_coarse,
58734 };
58735 - struct k_clock clock_boottime = {
58736 + static struct k_clock clock_boottime = {
58737 .clock_getres = hrtimer_get_res,
58738 .clock_get = posix_get_boottime,
58739 .nsleep = common_nsleep,
58740 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58741 .timer_del = common_timer_del,
58742 };
58743
58744 + pax_track_stack();
58745 +
58746 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58747 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58748 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58749 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58750 return;
58751 }
58752
58753 - posix_clocks[clock_id] = *new_clock;
58754 + posix_clocks[clock_id] = new_clock;
58755 }
58756 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58757
58758 @@ -512,9 +515,9 @@ static struct k_clock *clockid_to_kclock
58759 return (id & CLOCKFD_MASK) == CLOCKFD ?
58760 &clock_posix_dynamic : &clock_posix_cpu;
58761
58762 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58763 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58764 return NULL;
58765 - return &posix_clocks[id];
58766 + return posix_clocks[id];
58767 }
58768
58769 static int common_timer_create(struct k_itimer *new_timer)
58770 @@ -956,6 +959,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58771 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58772 return -EFAULT;
58773
58774 + /* only the CLOCK_REALTIME clock can be set, all other clocks
58775 + have their clock_set fptr set to a nosettime dummy function
58776 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58777 + call common_clock_set, which calls do_sys_settimeofday, which
58778 + we hook
58779 + */
58780 +
58781 return kc->clock_set(which_clock, &new_tp);
58782 }
58783
58784 diff -urNp linux-2.6.39.4/kernel/power/poweroff.c linux-2.6.39.4/kernel/power/poweroff.c
58785 --- linux-2.6.39.4/kernel/power/poweroff.c 2011-05-19 00:06:34.000000000 -0400
58786 +++ linux-2.6.39.4/kernel/power/poweroff.c 2011-08-05 19:44:37.000000000 -0400
58787 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58788 .enable_mask = SYSRQ_ENABLE_BOOT,
58789 };
58790
58791 -static int pm_sysrq_init(void)
58792 +static int __init pm_sysrq_init(void)
58793 {
58794 register_sysrq_key('o', &sysrq_poweroff_op);
58795 return 0;
58796 diff -urNp linux-2.6.39.4/kernel/power/process.c linux-2.6.39.4/kernel/power/process.c
58797 --- linux-2.6.39.4/kernel/power/process.c 2011-05-19 00:06:34.000000000 -0400
58798 +++ linux-2.6.39.4/kernel/power/process.c 2011-08-05 19:44:37.000000000 -0400
58799 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58800 u64 elapsed_csecs64;
58801 unsigned int elapsed_csecs;
58802 bool wakeup = false;
58803 + bool timedout = false;
58804
58805 do_gettimeofday(&start);
58806
58807 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58808
58809 while (true) {
58810 todo = 0;
58811 + if (time_after(jiffies, end_time))
58812 + timedout = true;
58813 read_lock(&tasklist_lock);
58814 do_each_thread(g, p) {
58815 if (frozen(p) || !freezable(p))
58816 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58817 * try_to_stop() after schedule() in ptrace/signal
58818 * stop sees TIF_FREEZE.
58819 */
58820 - if (!task_is_stopped_or_traced(p) &&
58821 - !freezer_should_skip(p))
58822 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58823 todo++;
58824 + if (timedout) {
58825 + printk(KERN_ERR "Task refusing to freeze:\n");
58826 + sched_show_task(p);
58827 + }
58828 + }
58829 } while_each_thread(g, p);
58830 read_unlock(&tasklist_lock);
58831
58832 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58833 todo += wq_busy;
58834 }
58835
58836 - if (!todo || time_after(jiffies, end_time))
58837 + if (!todo || timedout)
58838 break;
58839
58840 if (pm_wakeup_pending()) {
58841 diff -urNp linux-2.6.39.4/kernel/printk.c linux-2.6.39.4/kernel/printk.c
58842 --- linux-2.6.39.4/kernel/printk.c 2011-05-19 00:06:34.000000000 -0400
58843 +++ linux-2.6.39.4/kernel/printk.c 2011-08-05 19:44:37.000000000 -0400
58844 @@ -284,12 +284,17 @@ static int check_syslog_permissions(int
58845 if (from_file && type != SYSLOG_ACTION_OPEN)
58846 return 0;
58847
58848 +#ifdef CONFIG_GRKERNSEC_DMESG
58849 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58850 + return -EPERM;
58851 +#endif
58852 +
58853 if (syslog_action_restricted(type)) {
58854 if (capable(CAP_SYSLOG))
58855 return 0;
58856 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58857 if (capable(CAP_SYS_ADMIN)) {
58858 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58859 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58860 "but no CAP_SYSLOG (deprecated).\n");
58861 return 0;
58862 }
58863 diff -urNp linux-2.6.39.4/kernel/profile.c linux-2.6.39.4/kernel/profile.c
58864 --- linux-2.6.39.4/kernel/profile.c 2011-05-19 00:06:34.000000000 -0400
58865 +++ linux-2.6.39.4/kernel/profile.c 2011-08-05 19:44:37.000000000 -0400
58866 @@ -39,7 +39,7 @@ struct profile_hit {
58867 /* Oprofile timer tick hook */
58868 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58869
58870 -static atomic_t *prof_buffer;
58871 +static atomic_unchecked_t *prof_buffer;
58872 static unsigned long prof_len, prof_shift;
58873
58874 int prof_on __read_mostly;
58875 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
58876 hits[i].pc = 0;
58877 continue;
58878 }
58879 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58880 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58881 hits[i].hits = hits[i].pc = 0;
58882 }
58883 }
58884 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
58885 * Add the current hit(s) and flush the write-queue out
58886 * to the global buffer:
58887 */
58888 - atomic_add(nr_hits, &prof_buffer[pc]);
58889 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58890 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58891 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58892 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58893 hits[i].pc = hits[i].hits = 0;
58894 }
58895 out:
58896 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
58897 if (prof_on != type || !prof_buffer)
58898 return;
58899 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58900 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58901 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58902 }
58903 #endif /* !CONFIG_SMP */
58904 EXPORT_SYMBOL_GPL(profile_hits);
58905 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58906 return -EFAULT;
58907 buf++; p++; count--; read++;
58908 }
58909 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58910 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58911 if (copy_to_user(buf, (void *)pnt, count))
58912 return -EFAULT;
58913 read += count;
58914 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58915 }
58916 #endif
58917 profile_discard_flip_buffers();
58918 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58919 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58920 return count;
58921 }
58922
58923 diff -urNp linux-2.6.39.4/kernel/ptrace.c linux-2.6.39.4/kernel/ptrace.c
58924 --- linux-2.6.39.4/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
58925 +++ linux-2.6.39.4/kernel/ptrace.c 2011-08-05 19:44:37.000000000 -0400
58926 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
58927 return ret;
58928 }
58929
58930 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58931 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58932 + unsigned int log)
58933 {
58934 const struct cred *cred = current_cred(), *tcred;
58935
58936 @@ -143,7 +144,8 @@ int __ptrace_may_access(struct task_stru
58937 cred->gid == tcred->sgid &&
58938 cred->gid == tcred->gid))
58939 goto ok;
58940 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58941 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58942 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58943 goto ok;
58944 rcu_read_unlock();
58945 return -EPERM;
58946 @@ -152,7 +154,9 @@ ok:
58947 smp_rmb();
58948 if (task->mm)
58949 dumpable = get_dumpable(task->mm);
58950 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58951 + if (!dumpable &&
58952 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58953 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58954 return -EPERM;
58955
58956 return security_ptrace_access_check(task, mode);
58957 @@ -162,7 +166,16 @@ bool ptrace_may_access(struct task_struc
58958 {
58959 int err;
58960 task_lock(task);
58961 - err = __ptrace_may_access(task, mode);
58962 + err = __ptrace_may_access(task, mode, 0);
58963 + task_unlock(task);
58964 + return !err;
58965 +}
58966 +
58967 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58968 +{
58969 + int err;
58970 + task_lock(task);
58971 + err = __ptrace_may_access(task, mode, 1);
58972 task_unlock(task);
58973 return !err;
58974 }
58975 @@ -189,7 +202,7 @@ static int ptrace_attach(struct task_str
58976 goto out;
58977
58978 task_lock(task);
58979 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58980 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58981 task_unlock(task);
58982 if (retval)
58983 goto unlock_creds;
58984 @@ -202,7 +215,7 @@ static int ptrace_attach(struct task_str
58985 goto unlock_tasklist;
58986
58987 task->ptrace = PT_PTRACED;
58988 - if (task_ns_capable(task, CAP_SYS_PTRACE))
58989 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58990 task->ptrace |= PT_PTRACE_CAP;
58991
58992 __ptrace_link(task, current);
58993 @@ -362,6 +375,8 @@ int ptrace_readdata(struct task_struct *
58994 {
58995 int copied = 0;
58996
58997 + pax_track_stack();
58998 +
58999 while (len > 0) {
59000 char buf[128];
59001 int this_len, retval;
59002 @@ -373,7 +388,7 @@ int ptrace_readdata(struct task_struct *
59003 break;
59004 return -EIO;
59005 }
59006 - if (copy_to_user(dst, buf, retval))
59007 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
59008 return -EFAULT;
59009 copied += retval;
59010 src += retval;
59011 @@ -387,6 +402,8 @@ int ptrace_writedata(struct task_struct
59012 {
59013 int copied = 0;
59014
59015 + pax_track_stack();
59016 +
59017 while (len > 0) {
59018 char buf[128];
59019 int this_len, retval;
59020 @@ -569,9 +586,11 @@ int ptrace_request(struct task_struct *c
59021 {
59022 int ret = -EIO;
59023 siginfo_t siginfo;
59024 - void __user *datavp = (void __user *) data;
59025 + void __user *datavp = (__force void __user *) data;
59026 unsigned long __user *datalp = datavp;
59027
59028 + pax_track_stack();
59029 +
59030 switch (request) {
59031 case PTRACE_PEEKTEXT:
59032 case PTRACE_PEEKDATA:
59033 @@ -717,14 +736,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
59034 goto out;
59035 }
59036
59037 + if (gr_handle_ptrace(child, request)) {
59038 + ret = -EPERM;
59039 + goto out_put_task_struct;
59040 + }
59041 +
59042 if (request == PTRACE_ATTACH) {
59043 ret = ptrace_attach(child);
59044 /*
59045 * Some architectures need to do book-keeping after
59046 * a ptrace attach.
59047 */
59048 - if (!ret)
59049 + if (!ret) {
59050 arch_ptrace_attach(child);
59051 + gr_audit_ptrace(child);
59052 + }
59053 goto out_put_task_struct;
59054 }
59055
59056 @@ -749,7 +775,7 @@ int generic_ptrace_peekdata(struct task_
59057 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
59058 if (copied != sizeof(tmp))
59059 return -EIO;
59060 - return put_user(tmp, (unsigned long __user *)data);
59061 + return put_user(tmp, (__force unsigned long __user *)data);
59062 }
59063
59064 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
59065 @@ -772,6 +798,8 @@ int compat_ptrace_request(struct task_st
59066 siginfo_t siginfo;
59067 int ret;
59068
59069 + pax_track_stack();
59070 +
59071 switch (request) {
59072 case PTRACE_PEEKTEXT:
59073 case PTRACE_PEEKDATA:
59074 @@ -859,14 +887,21 @@ asmlinkage long compat_sys_ptrace(compat
59075 goto out;
59076 }
59077
59078 + if (gr_handle_ptrace(child, request)) {
59079 + ret = -EPERM;
59080 + goto out_put_task_struct;
59081 + }
59082 +
59083 if (request == PTRACE_ATTACH) {
59084 ret = ptrace_attach(child);
59085 /*
59086 * Some architectures need to do book-keeping after
59087 * a ptrace attach.
59088 */
59089 - if (!ret)
59090 + if (!ret) {
59091 arch_ptrace_attach(child);
59092 + gr_audit_ptrace(child);
59093 + }
59094 goto out_put_task_struct;
59095 }
59096
59097 diff -urNp linux-2.6.39.4/kernel/rcutorture.c linux-2.6.39.4/kernel/rcutorture.c
59098 --- linux-2.6.39.4/kernel/rcutorture.c 2011-05-19 00:06:34.000000000 -0400
59099 +++ linux-2.6.39.4/kernel/rcutorture.c 2011-08-05 19:44:37.000000000 -0400
59100 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
59101 { 0 };
59102 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
59103 { 0 };
59104 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59105 -static atomic_t n_rcu_torture_alloc;
59106 -static atomic_t n_rcu_torture_alloc_fail;
59107 -static atomic_t n_rcu_torture_free;
59108 -static atomic_t n_rcu_torture_mberror;
59109 -static atomic_t n_rcu_torture_error;
59110 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59111 +static atomic_unchecked_t n_rcu_torture_alloc;
59112 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
59113 +static atomic_unchecked_t n_rcu_torture_free;
59114 +static atomic_unchecked_t n_rcu_torture_mberror;
59115 +static atomic_unchecked_t n_rcu_torture_error;
59116 static long n_rcu_torture_boost_ktrerror;
59117 static long n_rcu_torture_boost_rterror;
59118 static long n_rcu_torture_boost_allocerror;
59119 @@ -225,11 +225,11 @@ rcu_torture_alloc(void)
59120
59121 spin_lock_bh(&rcu_torture_lock);
59122 if (list_empty(&rcu_torture_freelist)) {
59123 - atomic_inc(&n_rcu_torture_alloc_fail);
59124 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
59125 spin_unlock_bh(&rcu_torture_lock);
59126 return NULL;
59127 }
59128 - atomic_inc(&n_rcu_torture_alloc);
59129 + atomic_inc_unchecked(&n_rcu_torture_alloc);
59130 p = rcu_torture_freelist.next;
59131 list_del_init(p);
59132 spin_unlock_bh(&rcu_torture_lock);
59133 @@ -242,7 +242,7 @@ rcu_torture_alloc(void)
59134 static void
59135 rcu_torture_free(struct rcu_torture *p)
59136 {
59137 - atomic_inc(&n_rcu_torture_free);
59138 + atomic_inc_unchecked(&n_rcu_torture_free);
59139 spin_lock_bh(&rcu_torture_lock);
59140 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
59141 spin_unlock_bh(&rcu_torture_lock);
59142 @@ -362,7 +362,7 @@ rcu_torture_cb(struct rcu_head *p)
59143 i = rp->rtort_pipe_count;
59144 if (i > RCU_TORTURE_PIPE_LEN)
59145 i = RCU_TORTURE_PIPE_LEN;
59146 - atomic_inc(&rcu_torture_wcount[i]);
59147 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59148 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59149 rp->rtort_mbtest = 0;
59150 rcu_torture_free(rp);
59151 @@ -409,7 +409,7 @@ static void rcu_sync_torture_deferred_fr
59152 i = rp->rtort_pipe_count;
59153 if (i > RCU_TORTURE_PIPE_LEN)
59154 i = RCU_TORTURE_PIPE_LEN;
59155 - atomic_inc(&rcu_torture_wcount[i]);
59156 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59157 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59158 rp->rtort_mbtest = 0;
59159 list_del(&rp->rtort_free);
59160 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
59161 i = old_rp->rtort_pipe_count;
59162 if (i > RCU_TORTURE_PIPE_LEN)
59163 i = RCU_TORTURE_PIPE_LEN;
59164 - atomic_inc(&rcu_torture_wcount[i]);
59165 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59166 old_rp->rtort_pipe_count++;
59167 cur_ops->deferred_free(old_rp);
59168 }
59169 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
59170 return;
59171 }
59172 if (p->rtort_mbtest == 0)
59173 - atomic_inc(&n_rcu_torture_mberror);
59174 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59175 spin_lock(&rand_lock);
59176 cur_ops->read_delay(&rand);
59177 n_rcu_torture_timers++;
59178 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
59179 continue;
59180 }
59181 if (p->rtort_mbtest == 0)
59182 - atomic_inc(&n_rcu_torture_mberror);
59183 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59184 cur_ops->read_delay(&rand);
59185 preempt_disable();
59186 pipe_count = p->rtort_pipe_count;
59187 @@ -1072,10 +1072,10 @@ rcu_torture_printk(char *page)
59188 rcu_torture_current,
59189 rcu_torture_current_version,
59190 list_empty(&rcu_torture_freelist),
59191 - atomic_read(&n_rcu_torture_alloc),
59192 - atomic_read(&n_rcu_torture_alloc_fail),
59193 - atomic_read(&n_rcu_torture_free),
59194 - atomic_read(&n_rcu_torture_mberror),
59195 + atomic_read_unchecked(&n_rcu_torture_alloc),
59196 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
59197 + atomic_read_unchecked(&n_rcu_torture_free),
59198 + atomic_read_unchecked(&n_rcu_torture_mberror),
59199 n_rcu_torture_boost_ktrerror,
59200 n_rcu_torture_boost_rterror,
59201 n_rcu_torture_boost_allocerror,
59202 @@ -1083,7 +1083,7 @@ rcu_torture_printk(char *page)
59203 n_rcu_torture_boost_failure,
59204 n_rcu_torture_boosts,
59205 n_rcu_torture_timers);
59206 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
59207 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
59208 n_rcu_torture_boost_ktrerror != 0 ||
59209 n_rcu_torture_boost_rterror != 0 ||
59210 n_rcu_torture_boost_allocerror != 0 ||
59211 @@ -1093,7 +1093,7 @@ rcu_torture_printk(char *page)
59212 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
59213 if (i > 1) {
59214 cnt += sprintf(&page[cnt], "!!! ");
59215 - atomic_inc(&n_rcu_torture_error);
59216 + atomic_inc_unchecked(&n_rcu_torture_error);
59217 WARN_ON_ONCE(1);
59218 }
59219 cnt += sprintf(&page[cnt], "Reader Pipe: ");
59220 @@ -1107,7 +1107,7 @@ rcu_torture_printk(char *page)
59221 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
59222 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59223 cnt += sprintf(&page[cnt], " %d",
59224 - atomic_read(&rcu_torture_wcount[i]));
59225 + atomic_read_unchecked(&rcu_torture_wcount[i]));
59226 }
59227 cnt += sprintf(&page[cnt], "\n");
59228 if (cur_ops->stats)
59229 @@ -1415,7 +1415,7 @@ rcu_torture_cleanup(void)
59230
59231 if (cur_ops->cleanup)
59232 cur_ops->cleanup();
59233 - if (atomic_read(&n_rcu_torture_error))
59234 + if (atomic_read_unchecked(&n_rcu_torture_error))
59235 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
59236 else
59237 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
59238 @@ -1479,11 +1479,11 @@ rcu_torture_init(void)
59239
59240 rcu_torture_current = NULL;
59241 rcu_torture_current_version = 0;
59242 - atomic_set(&n_rcu_torture_alloc, 0);
59243 - atomic_set(&n_rcu_torture_alloc_fail, 0);
59244 - atomic_set(&n_rcu_torture_free, 0);
59245 - atomic_set(&n_rcu_torture_mberror, 0);
59246 - atomic_set(&n_rcu_torture_error, 0);
59247 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
59248 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
59249 + atomic_set_unchecked(&n_rcu_torture_free, 0);
59250 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
59251 + atomic_set_unchecked(&n_rcu_torture_error, 0);
59252 n_rcu_torture_boost_ktrerror = 0;
59253 n_rcu_torture_boost_rterror = 0;
59254 n_rcu_torture_boost_allocerror = 0;
59255 @@ -1491,7 +1491,7 @@ rcu_torture_init(void)
59256 n_rcu_torture_boost_failure = 0;
59257 n_rcu_torture_boosts = 0;
59258 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
59259 - atomic_set(&rcu_torture_wcount[i], 0);
59260 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
59261 for_each_possible_cpu(cpu) {
59262 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59263 per_cpu(rcu_torture_count, cpu)[i] = 0;
59264 diff -urNp linux-2.6.39.4/kernel/rcutree.c linux-2.6.39.4/kernel/rcutree.c
59265 --- linux-2.6.39.4/kernel/rcutree.c 2011-05-19 00:06:34.000000000 -0400
59266 +++ linux-2.6.39.4/kernel/rcutree.c 2011-08-05 19:44:37.000000000 -0400
59267 @@ -1389,7 +1389,7 @@ __rcu_process_callbacks(struct rcu_state
59268 /*
59269 * Do softirq processing for the current CPU.
59270 */
59271 -static void rcu_process_callbacks(struct softirq_action *unused)
59272 +static void rcu_process_callbacks(void)
59273 {
59274 /*
59275 * Memory references from any prior RCU read-side critical sections
59276 diff -urNp linux-2.6.39.4/kernel/rcutree_plugin.h linux-2.6.39.4/kernel/rcutree_plugin.h
59277 --- linux-2.6.39.4/kernel/rcutree_plugin.h 2011-05-19 00:06:34.000000000 -0400
59278 +++ linux-2.6.39.4/kernel/rcutree_plugin.h 2011-08-05 19:44:37.000000000 -0400
59279 @@ -730,7 +730,7 @@ void synchronize_rcu_expedited(void)
59280
59281 /* Clean up and exit. */
59282 smp_mb(); /* ensure expedited GP seen before counter increment. */
59283 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
59284 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
59285 unlock_mb_ret:
59286 mutex_unlock(&sync_rcu_preempt_exp_mutex);
59287 mb_ret:
59288 @@ -1025,8 +1025,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
59289
59290 #else /* #ifndef CONFIG_SMP */
59291
59292 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
59293 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
59294 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
59295 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
59296
59297 static int synchronize_sched_expedited_cpu_stop(void *data)
59298 {
59299 @@ -1081,7 +1081,7 @@ void synchronize_sched_expedited(void)
59300 int firstsnap, s, snap, trycount = 0;
59301
59302 /* Note that atomic_inc_return() implies full memory barrier. */
59303 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
59304 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
59305 get_online_cpus();
59306
59307 /*
59308 @@ -1102,7 +1102,7 @@ void synchronize_sched_expedited(void)
59309 }
59310
59311 /* Check to see if someone else did our work for us. */
59312 - s = atomic_read(&sync_sched_expedited_done);
59313 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59314 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
59315 smp_mb(); /* ensure test happens before caller kfree */
59316 return;
59317 @@ -1117,7 +1117,7 @@ void synchronize_sched_expedited(void)
59318 * grace period works for us.
59319 */
59320 get_online_cpus();
59321 - snap = atomic_read(&sync_sched_expedited_started) - 1;
59322 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
59323 smp_mb(); /* ensure read is before try_stop_cpus(). */
59324 }
59325
59326 @@ -1128,12 +1128,12 @@ void synchronize_sched_expedited(void)
59327 * than we did beat us to the punch.
59328 */
59329 do {
59330 - s = atomic_read(&sync_sched_expedited_done);
59331 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59332 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
59333 smp_mb(); /* ensure test happens before caller kfree */
59334 break;
59335 }
59336 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
59337 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59338
59339 put_online_cpus();
59340 }
59341 diff -urNp linux-2.6.39.4/kernel/relay.c linux-2.6.39.4/kernel/relay.c
59342 --- linux-2.6.39.4/kernel/relay.c 2011-05-19 00:06:34.000000000 -0400
59343 +++ linux-2.6.39.4/kernel/relay.c 2011-08-05 19:44:37.000000000 -0400
59344 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59345 };
59346 ssize_t ret;
59347
59348 + pax_track_stack();
59349 +
59350 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59351 return 0;
59352 if (splice_grow_spd(pipe, &spd))
59353 diff -urNp linux-2.6.39.4/kernel/resource.c linux-2.6.39.4/kernel/resource.c
59354 --- linux-2.6.39.4/kernel/resource.c 2011-05-19 00:06:34.000000000 -0400
59355 +++ linux-2.6.39.4/kernel/resource.c 2011-08-05 19:44:37.000000000 -0400
59356 @@ -133,8 +133,18 @@ static const struct file_operations proc
59357
59358 static int __init ioresources_init(void)
59359 {
59360 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59361 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59362 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59363 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59364 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59365 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59366 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59367 +#endif
59368 +#else
59369 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59370 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59371 +#endif
59372 return 0;
59373 }
59374 __initcall(ioresources_init);
59375 diff -urNp linux-2.6.39.4/kernel/rtmutex-tester.c linux-2.6.39.4/kernel/rtmutex-tester.c
59376 --- linux-2.6.39.4/kernel/rtmutex-tester.c 2011-05-19 00:06:34.000000000 -0400
59377 +++ linux-2.6.39.4/kernel/rtmutex-tester.c 2011-08-05 19:44:37.000000000 -0400
59378 @@ -20,7 +20,7 @@
59379 #define MAX_RT_TEST_MUTEXES 8
59380
59381 static spinlock_t rttest_lock;
59382 -static atomic_t rttest_event;
59383 +static atomic_unchecked_t rttest_event;
59384
59385 struct test_thread_data {
59386 int opcode;
59387 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59388
59389 case RTTEST_LOCKCONT:
59390 td->mutexes[td->opdata] = 1;
59391 - td->event = atomic_add_return(1, &rttest_event);
59392 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59393 return 0;
59394
59395 case RTTEST_RESET:
59396 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59397 return 0;
59398
59399 case RTTEST_RESETEVENT:
59400 - atomic_set(&rttest_event, 0);
59401 + atomic_set_unchecked(&rttest_event, 0);
59402 return 0;
59403
59404 default:
59405 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59406 return ret;
59407
59408 td->mutexes[id] = 1;
59409 - td->event = atomic_add_return(1, &rttest_event);
59410 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59411 rt_mutex_lock(&mutexes[id]);
59412 - td->event = atomic_add_return(1, &rttest_event);
59413 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59414 td->mutexes[id] = 4;
59415 return 0;
59416
59417 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59418 return ret;
59419
59420 td->mutexes[id] = 1;
59421 - td->event = atomic_add_return(1, &rttest_event);
59422 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59423 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59424 - td->event = atomic_add_return(1, &rttest_event);
59425 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59426 td->mutexes[id] = ret ? 0 : 4;
59427 return ret ? -EINTR : 0;
59428
59429 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59430 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59431 return ret;
59432
59433 - td->event = atomic_add_return(1, &rttest_event);
59434 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59435 rt_mutex_unlock(&mutexes[id]);
59436 - td->event = atomic_add_return(1, &rttest_event);
59437 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59438 td->mutexes[id] = 0;
59439 return 0;
59440
59441 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59442 break;
59443
59444 td->mutexes[dat] = 2;
59445 - td->event = atomic_add_return(1, &rttest_event);
59446 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59447 break;
59448
59449 default:
59450 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59451 return;
59452
59453 td->mutexes[dat] = 3;
59454 - td->event = atomic_add_return(1, &rttest_event);
59455 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59456 break;
59457
59458 case RTTEST_LOCKNOWAIT:
59459 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59460 return;
59461
59462 td->mutexes[dat] = 1;
59463 - td->event = atomic_add_return(1, &rttest_event);
59464 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59465 return;
59466
59467 default:
59468 diff -urNp linux-2.6.39.4/kernel/sched_autogroup.c linux-2.6.39.4/kernel/sched_autogroup.c
59469 --- linux-2.6.39.4/kernel/sched_autogroup.c 2011-05-19 00:06:34.000000000 -0400
59470 +++ linux-2.6.39.4/kernel/sched_autogroup.c 2011-08-05 19:44:37.000000000 -0400
59471 @@ -7,7 +7,7 @@
59472
59473 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59474 static struct autogroup autogroup_default;
59475 -static atomic_t autogroup_seq_nr;
59476 +static atomic_unchecked_t autogroup_seq_nr;
59477
59478 static void __init autogroup_init(struct task_struct *init_task)
59479 {
59480 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59481
59482 kref_init(&ag->kref);
59483 init_rwsem(&ag->lock);
59484 - ag->id = atomic_inc_return(&autogroup_seq_nr);
59485 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59486 ag->tg = tg;
59487 #ifdef CONFIG_RT_GROUP_SCHED
59488 /*
59489 diff -urNp linux-2.6.39.4/kernel/sched.c linux-2.6.39.4/kernel/sched.c
59490 --- linux-2.6.39.4/kernel/sched.c 2011-05-19 00:06:34.000000000 -0400
59491 +++ linux-2.6.39.4/kernel/sched.c 2011-08-05 19:44:37.000000000 -0400
59492 @@ -4078,6 +4078,8 @@ asmlinkage void __sched schedule(void)
59493 struct rq *rq;
59494 int cpu;
59495
59496 + pax_track_stack();
59497 +
59498 need_resched:
59499 preempt_disable();
59500 cpu = smp_processor_id();
59501 @@ -4165,7 +4167,7 @@ EXPORT_SYMBOL(schedule);
59502 * Look out! "owner" is an entirely speculative pointer
59503 * access and not reliable.
59504 */
59505 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
59506 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
59507 {
59508 unsigned int cpu;
59509 struct rq *rq;
59510 @@ -4179,10 +4181,10 @@ int mutex_spin_on_owner(struct mutex *lo
59511 * DEBUG_PAGEALLOC could have unmapped it if
59512 * the mutex owner just released it and exited.
59513 */
59514 - if (probe_kernel_address(&owner->cpu, cpu))
59515 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
59516 return 0;
59517 #else
59518 - cpu = owner->cpu;
59519 + cpu = task_thread_info(owner)->cpu;
59520 #endif
59521
59522 /*
59523 @@ -4219,7 +4221,7 @@ int mutex_spin_on_owner(struct mutex *lo
59524 /*
59525 * Is that owner really running on that cpu?
59526 */
59527 - if (task_thread_info(rq->curr) != owner || need_resched())
59528 + if (rq->curr != owner || need_resched())
59529 return 0;
59530
59531 arch_mutex_cpu_relax();
59532 @@ -4778,6 +4780,8 @@ int can_nice(const struct task_struct *p
59533 /* convert nice value [19,-20] to rlimit style value [1,40] */
59534 int nice_rlim = 20 - nice;
59535
59536 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59537 +
59538 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59539 capable(CAP_SYS_NICE));
59540 }
59541 @@ -4811,7 +4815,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59542 if (nice > 19)
59543 nice = 19;
59544
59545 - if (increment < 0 && !can_nice(current, nice))
59546 + if (increment < 0 && (!can_nice(current, nice) ||
59547 + gr_handle_chroot_nice()))
59548 return -EPERM;
59549
59550 retval = security_task_setnice(current, nice);
59551 @@ -4957,6 +4962,7 @@ recheck:
59552 unsigned long rlim_rtprio =
59553 task_rlimit(p, RLIMIT_RTPRIO);
59554
59555 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59556 /* can't set/change the rt policy */
59557 if (policy != p->policy && !rlim_rtprio)
59558 return -EPERM;
59559 @@ -7164,7 +7170,7 @@ static void init_sched_groups_power(int
59560 long power;
59561 int weight;
59562
59563 - WARN_ON(!sd || !sd->groups);
59564 + BUG_ON(!sd || !sd->groups);
59565
59566 if (cpu != group_first_cpu(sd->groups))
59567 return;
59568 diff -urNp linux-2.6.39.4/kernel/sched_fair.c linux-2.6.39.4/kernel/sched_fair.c
59569 --- linux-2.6.39.4/kernel/sched_fair.c 2011-05-19 00:06:34.000000000 -0400
59570 +++ linux-2.6.39.4/kernel/sched_fair.c 2011-08-05 19:44:37.000000000 -0400
59571 @@ -3999,7 +3999,7 @@ static void nohz_idle_balance(int this_c
59572 * run_rebalance_domains is triggered when needed from the scheduler tick.
59573 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59574 */
59575 -static void run_rebalance_domains(struct softirq_action *h)
59576 +static void run_rebalance_domains(void)
59577 {
59578 int this_cpu = smp_processor_id();
59579 struct rq *this_rq = cpu_rq(this_cpu);
59580 diff -urNp linux-2.6.39.4/kernel/signal.c linux-2.6.39.4/kernel/signal.c
59581 --- linux-2.6.39.4/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
59582 +++ linux-2.6.39.4/kernel/signal.c 2011-08-16 21:16:33.000000000 -0400
59583 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59584
59585 int print_fatal_signals __read_mostly;
59586
59587 -static void __user *sig_handler(struct task_struct *t, int sig)
59588 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
59589 {
59590 return t->sighand->action[sig - 1].sa.sa_handler;
59591 }
59592
59593 -static int sig_handler_ignored(void __user *handler, int sig)
59594 +static int sig_handler_ignored(__sighandler_t handler, int sig)
59595 {
59596 /* Is it explicitly or implicitly ignored? */
59597 return handler == SIG_IGN ||
59598 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59599 static int sig_task_ignored(struct task_struct *t, int sig,
59600 int from_ancestor_ns)
59601 {
59602 - void __user *handler;
59603 + __sighandler_t handler;
59604
59605 handler = sig_handler(t, sig);
59606
59607 @@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
59608 atomic_inc(&user->sigpending);
59609 rcu_read_unlock();
59610
59611 + if (!override_rlimit)
59612 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59613 +
59614 if (override_rlimit ||
59615 atomic_read(&user->sigpending) <=
59616 task_rlimit(t, RLIMIT_SIGPENDING)) {
59617 @@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
59618
59619 int unhandled_signal(struct task_struct *tsk, int sig)
59620 {
59621 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59622 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59623 if (is_global_init(tsk))
59624 return 1;
59625 if (handler != SIG_IGN && handler != SIG_DFL)
59626 @@ -693,6 +696,13 @@ static int check_kill_permission(int sig
59627 }
59628 }
59629
59630 + /* allow glibc communication via tgkill to other threads in our
59631 + thread group */
59632 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
59633 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
59634 + && gr_handle_signal(t, sig))
59635 + return -EPERM;
59636 +
59637 return security_task_kill(t, info, sig, 0);
59638 }
59639
59640 @@ -1041,7 +1051,7 @@ __group_send_sig_info(int sig, struct si
59641 return send_signal(sig, info, p, 1);
59642 }
59643
59644 -static int
59645 +int
59646 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59647 {
59648 return send_signal(sig, info, t, 0);
59649 @@ -1078,6 +1088,7 @@ force_sig_info(int sig, struct siginfo *
59650 unsigned long int flags;
59651 int ret, blocked, ignored;
59652 struct k_sigaction *action;
59653 + int is_unhandled = 0;
59654
59655 spin_lock_irqsave(&t->sighand->siglock, flags);
59656 action = &t->sighand->action[sig-1];
59657 @@ -1092,9 +1103,18 @@ force_sig_info(int sig, struct siginfo *
59658 }
59659 if (action->sa.sa_handler == SIG_DFL)
59660 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59661 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59662 + is_unhandled = 1;
59663 ret = specific_send_sig_info(sig, info, t);
59664 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59665
59666 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
59667 + normal operation */
59668 + if (is_unhandled) {
59669 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59670 + gr_handle_crash(t, sig);
59671 + }
59672 +
59673 return ret;
59674 }
59675
59676 @@ -1153,8 +1173,11 @@ int group_send_sig_info(int sig, struct
59677 ret = check_kill_permission(sig, info, p);
59678 rcu_read_unlock();
59679
59680 - if (!ret && sig)
59681 + if (!ret && sig) {
59682 ret = do_send_sig_info(sig, info, p, true);
59683 + if (!ret)
59684 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59685 + }
59686
59687 return ret;
59688 }
59689 @@ -1718,6 +1741,8 @@ void ptrace_notify(int exit_code)
59690 {
59691 siginfo_t info;
59692
59693 + pax_track_stack();
59694 +
59695 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59696
59697 memset(&info, 0, sizeof info);
59698 @@ -2393,7 +2418,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59699 int error = -ESRCH;
59700
59701 rcu_read_lock();
59702 - p = find_task_by_vpid(pid);
59703 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59704 + /* allow glibc communication via tgkill to other threads in our
59705 + thread group */
59706 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59707 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
59708 + p = find_task_by_vpid_unrestricted(pid);
59709 + else
59710 +#endif
59711 + p = find_task_by_vpid(pid);
59712 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59713 error = check_kill_permission(sig, info, p);
59714 /*
59715 diff -urNp linux-2.6.39.4/kernel/smp.c linux-2.6.39.4/kernel/smp.c
59716 --- linux-2.6.39.4/kernel/smp.c 2011-05-19 00:06:34.000000000 -0400
59717 +++ linux-2.6.39.4/kernel/smp.c 2011-08-05 19:44:37.000000000 -0400
59718 @@ -583,22 +583,22 @@ int smp_call_function(smp_call_func_t fu
59719 }
59720 EXPORT_SYMBOL(smp_call_function);
59721
59722 -void ipi_call_lock(void)
59723 +void ipi_call_lock(void) __acquires(call_function.lock)
59724 {
59725 raw_spin_lock(&call_function.lock);
59726 }
59727
59728 -void ipi_call_unlock(void)
59729 +void ipi_call_unlock(void) __releases(call_function.lock)
59730 {
59731 raw_spin_unlock(&call_function.lock);
59732 }
59733
59734 -void ipi_call_lock_irq(void)
59735 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
59736 {
59737 raw_spin_lock_irq(&call_function.lock);
59738 }
59739
59740 -void ipi_call_unlock_irq(void)
59741 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
59742 {
59743 raw_spin_unlock_irq(&call_function.lock);
59744 }
59745 diff -urNp linux-2.6.39.4/kernel/softirq.c linux-2.6.39.4/kernel/softirq.c
59746 --- linux-2.6.39.4/kernel/softirq.c 2011-05-19 00:06:34.000000000 -0400
59747 +++ linux-2.6.39.4/kernel/softirq.c 2011-08-05 20:34:06.000000000 -0400
59748 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59749
59750 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59751
59752 -char *softirq_to_name[NR_SOFTIRQS] = {
59753 +const char * const softirq_to_name[NR_SOFTIRQS] = {
59754 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59755 "TASKLET", "SCHED", "HRTIMER", "RCU"
59756 };
59757 @@ -235,7 +235,7 @@ restart:
59758 kstat_incr_softirqs_this_cpu(vec_nr);
59759
59760 trace_softirq_entry(vec_nr);
59761 - h->action(h);
59762 + h->action();
59763 trace_softirq_exit(vec_nr);
59764 if (unlikely(prev_count != preempt_count())) {
59765 printk(KERN_ERR "huh, entered softirq %u %s %p"
59766 @@ -377,9 +377,11 @@ void raise_softirq(unsigned int nr)
59767 local_irq_restore(flags);
59768 }
59769
59770 -void open_softirq(int nr, void (*action)(struct softirq_action *))
59771 +void open_softirq(int nr, void (*action)(void))
59772 {
59773 - softirq_vec[nr].action = action;
59774 + pax_open_kernel();
59775 + *(void **)&softirq_vec[nr].action = action;
59776 + pax_close_kernel();
59777 }
59778
59779 /*
59780 @@ -433,7 +435,7 @@ void __tasklet_hi_schedule_first(struct
59781
59782 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59783
59784 -static void tasklet_action(struct softirq_action *a)
59785 +static void tasklet_action(void)
59786 {
59787 struct tasklet_struct *list;
59788
59789 @@ -468,7 +470,7 @@ static void tasklet_action(struct softir
59790 }
59791 }
59792
59793 -static void tasklet_hi_action(struct softirq_action *a)
59794 +static void tasklet_hi_action(void)
59795 {
59796 struct tasklet_struct *list;
59797
59798 diff -urNp linux-2.6.39.4/kernel/sys.c linux-2.6.39.4/kernel/sys.c
59799 --- linux-2.6.39.4/kernel/sys.c 2011-05-19 00:06:34.000000000 -0400
59800 +++ linux-2.6.39.4/kernel/sys.c 2011-08-05 19:44:37.000000000 -0400
59801 @@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59802 error = -EACCES;
59803 goto out;
59804 }
59805 +
59806 + if (gr_handle_chroot_setpriority(p, niceval)) {
59807 + error = -EACCES;
59808 + goto out;
59809 + }
59810 +
59811 no_nice = security_task_setnice(p, niceval);
59812 if (no_nice) {
59813 error = no_nice;
59814 @@ -538,6 +544,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59815 goto error;
59816 }
59817
59818 + if (gr_check_group_change(new->gid, new->egid, -1))
59819 + goto error;
59820 +
59821 if (rgid != (gid_t) -1 ||
59822 (egid != (gid_t) -1 && egid != old->gid))
59823 new->sgid = new->egid;
59824 @@ -567,6 +576,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59825 old = current_cred();
59826
59827 retval = -EPERM;
59828 +
59829 + if (gr_check_group_change(gid, gid, gid))
59830 + goto error;
59831 +
59832 if (nsown_capable(CAP_SETGID))
59833 new->gid = new->egid = new->sgid = new->fsgid = gid;
59834 else if (gid == old->gid || gid == old->sgid)
59835 @@ -647,6 +660,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59836 goto error;
59837 }
59838
59839 + if (gr_check_user_change(new->uid, new->euid, -1))
59840 + goto error;
59841 +
59842 if (new->uid != old->uid) {
59843 retval = set_user(new);
59844 if (retval < 0)
59845 @@ -691,6 +707,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59846 old = current_cred();
59847
59848 retval = -EPERM;
59849 +
59850 + if (gr_check_crash_uid(uid))
59851 + goto error;
59852 + if (gr_check_user_change(uid, uid, uid))
59853 + goto error;
59854 +
59855 if (nsown_capable(CAP_SETUID)) {
59856 new->suid = new->uid = uid;
59857 if (uid != old->uid) {
59858 @@ -745,6 +767,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59859 goto error;
59860 }
59861
59862 + if (gr_check_user_change(ruid, euid, -1))
59863 + goto error;
59864 +
59865 if (ruid != (uid_t) -1) {
59866 new->uid = ruid;
59867 if (ruid != old->uid) {
59868 @@ -809,6 +834,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59869 goto error;
59870 }
59871
59872 + if (gr_check_group_change(rgid, egid, -1))
59873 + goto error;
59874 +
59875 if (rgid != (gid_t) -1)
59876 new->gid = rgid;
59877 if (egid != (gid_t) -1)
59878 @@ -855,6 +883,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59879 old = current_cred();
59880 old_fsuid = old->fsuid;
59881
59882 + if (gr_check_user_change(-1, -1, uid))
59883 + goto error;
59884 +
59885 if (uid == old->uid || uid == old->euid ||
59886 uid == old->suid || uid == old->fsuid ||
59887 nsown_capable(CAP_SETUID)) {
59888 @@ -865,6 +896,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59889 }
59890 }
59891
59892 +error:
59893 abort_creds(new);
59894 return old_fsuid;
59895
59896 @@ -891,12 +923,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59897 if (gid == old->gid || gid == old->egid ||
59898 gid == old->sgid || gid == old->fsgid ||
59899 nsown_capable(CAP_SETGID)) {
59900 + if (gr_check_group_change(-1, -1, gid))
59901 + goto error;
59902 +
59903 if (gid != old_fsgid) {
59904 new->fsgid = gid;
59905 goto change_okay;
59906 }
59907 }
59908
59909 +error:
59910 abort_creds(new);
59911 return old_fsgid;
59912
59913 @@ -1643,7 +1679,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59914 error = get_dumpable(me->mm);
59915 break;
59916 case PR_SET_DUMPABLE:
59917 - if (arg2 < 0 || arg2 > 1) {
59918 + if (arg2 > 1) {
59919 error = -EINVAL;
59920 break;
59921 }
59922 diff -urNp linux-2.6.39.4/kernel/sysctl.c linux-2.6.39.4/kernel/sysctl.c
59923 --- linux-2.6.39.4/kernel/sysctl.c 2011-05-19 00:06:34.000000000 -0400
59924 +++ linux-2.6.39.4/kernel/sysctl.c 2011-08-05 19:44:37.000000000 -0400
59925 @@ -84,6 +84,13 @@
59926
59927
59928 #if defined(CONFIG_SYSCTL)
59929 +#include <linux/grsecurity.h>
59930 +#include <linux/grinternal.h>
59931 +
59932 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59933 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59934 + const int op);
59935 +extern int gr_handle_chroot_sysctl(const int op);
59936
59937 /* External variables not in a header file. */
59938 extern int sysctl_overcommit_memory;
59939 @@ -196,6 +203,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59940 }
59941
59942 #endif
59943 +extern struct ctl_table grsecurity_table[];
59944
59945 static struct ctl_table root_table[];
59946 static struct ctl_table_root sysctl_table_root;
59947 @@ -225,6 +233,20 @@ extern struct ctl_table epoll_table[];
59948 int sysctl_legacy_va_layout;
59949 #endif
59950
59951 +#ifdef CONFIG_PAX_SOFTMODE
59952 +static ctl_table pax_table[] = {
59953 + {
59954 + .procname = "softmode",
59955 + .data = &pax_softmode,
59956 + .maxlen = sizeof(unsigned int),
59957 + .mode = 0600,
59958 + .proc_handler = &proc_dointvec,
59959 + },
59960 +
59961 + { }
59962 +};
59963 +#endif
59964 +
59965 /* The default sysctl tables: */
59966
59967 static struct ctl_table root_table[] = {
59968 @@ -271,6 +293,22 @@ static int max_extfrag_threshold = 1000;
59969 #endif
59970
59971 static struct ctl_table kern_table[] = {
59972 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59973 + {
59974 + .procname = "grsecurity",
59975 + .mode = 0500,
59976 + .child = grsecurity_table,
59977 + },
59978 +#endif
59979 +
59980 +#ifdef CONFIG_PAX_SOFTMODE
59981 + {
59982 + .procname = "pax",
59983 + .mode = 0500,
59984 + .child = pax_table,
59985 + },
59986 +#endif
59987 +
59988 {
59989 .procname = "sched_child_runs_first",
59990 .data = &sysctl_sched_child_runs_first,
59991 @@ -545,7 +583,7 @@ static struct ctl_table kern_table[] = {
59992 .data = &modprobe_path,
59993 .maxlen = KMOD_PATH_LEN,
59994 .mode = 0644,
59995 - .proc_handler = proc_dostring,
59996 + .proc_handler = proc_dostring_modpriv,
59997 },
59998 {
59999 .procname = "modules_disabled",
60000 @@ -707,16 +745,20 @@ static struct ctl_table kern_table[] = {
60001 .extra1 = &zero,
60002 .extra2 = &one,
60003 },
60004 +#endif
60005 {
60006 .procname = "kptr_restrict",
60007 .data = &kptr_restrict,
60008 .maxlen = sizeof(int),
60009 .mode = 0644,
60010 .proc_handler = proc_dmesg_restrict,
60011 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60012 + .extra1 = &two,
60013 +#else
60014 .extra1 = &zero,
60015 +#endif
60016 .extra2 = &two,
60017 },
60018 -#endif
60019 {
60020 .procname = "ngroups_max",
60021 .data = &ngroups_max,
60022 @@ -1189,6 +1231,13 @@ static struct ctl_table vm_table[] = {
60023 .proc_handler = proc_dointvec_minmax,
60024 .extra1 = &zero,
60025 },
60026 + {
60027 + .procname = "heap_stack_gap",
60028 + .data = &sysctl_heap_stack_gap,
60029 + .maxlen = sizeof(sysctl_heap_stack_gap),
60030 + .mode = 0644,
60031 + .proc_handler = proc_doulongvec_minmax,
60032 + },
60033 #else
60034 {
60035 .procname = "nr_trim_pages",
60036 @@ -1698,6 +1747,17 @@ static int test_perm(int mode, int op)
60037 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
60038 {
60039 int mode;
60040 + int error;
60041 +
60042 + if (table->parent != NULL && table->parent->procname != NULL &&
60043 + table->procname != NULL &&
60044 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
60045 + return -EACCES;
60046 + if (gr_handle_chroot_sysctl(op))
60047 + return -EACCES;
60048 + error = gr_handle_sysctl(table, op);
60049 + if (error)
60050 + return error;
60051
60052 if (root->permissions)
60053 mode = root->permissions(root, current->nsproxy, table);
60054 @@ -2102,6 +2162,16 @@ int proc_dostring(struct ctl_table *tabl
60055 buffer, lenp, ppos);
60056 }
60057
60058 +int proc_dostring_modpriv(struct ctl_table *table, int write,
60059 + void __user *buffer, size_t *lenp, loff_t *ppos)
60060 +{
60061 + if (write && !capable(CAP_SYS_MODULE))
60062 + return -EPERM;
60063 +
60064 + return _proc_do_string(table->data, table->maxlen, write,
60065 + buffer, lenp, ppos);
60066 +}
60067 +
60068 static size_t proc_skip_spaces(char **buf)
60069 {
60070 size_t ret;
60071 @@ -2207,6 +2277,8 @@ static int proc_put_long(void __user **b
60072 len = strlen(tmp);
60073 if (len > *size)
60074 len = *size;
60075 + if (len > sizeof(tmp))
60076 + len = sizeof(tmp);
60077 if (copy_to_user(*buf, tmp, len))
60078 return -EFAULT;
60079 *size -= len;
60080 @@ -2523,8 +2595,11 @@ static int __do_proc_doulongvec_minmax(v
60081 *i = val;
60082 } else {
60083 val = convdiv * (*i) / convmul;
60084 - if (!first)
60085 + if (!first) {
60086 err = proc_put_char(&buffer, &left, '\t');
60087 + if (err)
60088 + break;
60089 + }
60090 err = proc_put_long(&buffer, &left, val, false);
60091 if (err)
60092 break;
60093 @@ -2919,6 +2994,12 @@ int proc_dostring(struct ctl_table *tabl
60094 return -ENOSYS;
60095 }
60096
60097 +int proc_dostring_modpriv(struct ctl_table *table, int write,
60098 + void __user *buffer, size_t *lenp, loff_t *ppos)
60099 +{
60100 + return -ENOSYS;
60101 +}
60102 +
60103 int proc_dointvec(struct ctl_table *table, int write,
60104 void __user *buffer, size_t *lenp, loff_t *ppos)
60105 {
60106 @@ -2975,6 +3056,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
60107 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
60108 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
60109 EXPORT_SYMBOL(proc_dostring);
60110 +EXPORT_SYMBOL(proc_dostring_modpriv);
60111 EXPORT_SYMBOL(proc_doulongvec_minmax);
60112 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
60113 EXPORT_SYMBOL(register_sysctl_table);
60114 diff -urNp linux-2.6.39.4/kernel/sysctl_check.c linux-2.6.39.4/kernel/sysctl_check.c
60115 --- linux-2.6.39.4/kernel/sysctl_check.c 2011-05-19 00:06:34.000000000 -0400
60116 +++ linux-2.6.39.4/kernel/sysctl_check.c 2011-08-05 19:44:37.000000000 -0400
60117 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
60118 set_fail(&fail, table, "Directory with extra2");
60119 } else {
60120 if ((table->proc_handler == proc_dostring) ||
60121 + (table->proc_handler == proc_dostring_modpriv) ||
60122 (table->proc_handler == proc_dointvec) ||
60123 (table->proc_handler == proc_dointvec_minmax) ||
60124 (table->proc_handler == proc_dointvec_jiffies) ||
60125 diff -urNp linux-2.6.39.4/kernel/taskstats.c linux-2.6.39.4/kernel/taskstats.c
60126 --- linux-2.6.39.4/kernel/taskstats.c 2011-07-09 09:18:51.000000000 -0400
60127 +++ linux-2.6.39.4/kernel/taskstats.c 2011-08-05 19:44:37.000000000 -0400
60128 @@ -27,9 +27,12 @@
60129 #include <linux/cgroup.h>
60130 #include <linux/fs.h>
60131 #include <linux/file.h>
60132 +#include <linux/grsecurity.h>
60133 #include <net/genetlink.h>
60134 #include <asm/atomic.h>
60135
60136 +extern int gr_is_taskstats_denied(int pid);
60137 +
60138 /*
60139 * Maximum length of a cpumask that can be specified in
60140 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
60141 @@ -558,6 +561,9 @@ err:
60142
60143 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
60144 {
60145 + if (gr_is_taskstats_denied(current->pid))
60146 + return -EACCES;
60147 +
60148 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
60149 return cmd_attr_register_cpumask(info);
60150 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
60151 diff -urNp linux-2.6.39.4/kernel/time/tick-broadcast.c linux-2.6.39.4/kernel/time/tick-broadcast.c
60152 --- linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-05-19 00:06:34.000000000 -0400
60153 +++ linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-08-05 19:44:37.000000000 -0400
60154 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
60155 * then clear the broadcast bit.
60156 */
60157 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
60158 - int cpu = smp_processor_id();
60159 + cpu = smp_processor_id();
60160
60161 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
60162 tick_broadcast_clear_oneshot(cpu);
60163 diff -urNp linux-2.6.39.4/kernel/time/timekeeping.c linux-2.6.39.4/kernel/time/timekeeping.c
60164 --- linux-2.6.39.4/kernel/time/timekeeping.c 2011-05-19 00:06:34.000000000 -0400
60165 +++ linux-2.6.39.4/kernel/time/timekeeping.c 2011-08-05 19:44:37.000000000 -0400
60166 @@ -14,6 +14,7 @@
60167 #include <linux/init.h>
60168 #include <linux/mm.h>
60169 #include <linux/sched.h>
60170 +#include <linux/grsecurity.h>
60171 #include <linux/syscore_ops.h>
60172 #include <linux/clocksource.h>
60173 #include <linux/jiffies.h>
60174 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
60175 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
60176 return -EINVAL;
60177
60178 + gr_log_timechange();
60179 +
60180 write_seqlock_irqsave(&xtime_lock, flags);
60181
60182 timekeeping_forward_now();
60183 diff -urNp linux-2.6.39.4/kernel/time/timer_list.c linux-2.6.39.4/kernel/time/timer_list.c
60184 --- linux-2.6.39.4/kernel/time/timer_list.c 2011-05-19 00:06:34.000000000 -0400
60185 +++ linux-2.6.39.4/kernel/time/timer_list.c 2011-08-05 19:44:37.000000000 -0400
60186 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
60187
60188 static void print_name_offset(struct seq_file *m, void *sym)
60189 {
60190 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60191 + SEQ_printf(m, "<%p>", NULL);
60192 +#else
60193 char symname[KSYM_NAME_LEN];
60194
60195 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
60196 SEQ_printf(m, "<%pK>", sym);
60197 else
60198 SEQ_printf(m, "%s", symname);
60199 +#endif
60200 }
60201
60202 static void
60203 @@ -112,7 +116,11 @@ next_one:
60204 static void
60205 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
60206 {
60207 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60208 + SEQ_printf(m, " .base: %p\n", NULL);
60209 +#else
60210 SEQ_printf(m, " .base: %pK\n", base);
60211 +#endif
60212 SEQ_printf(m, " .index: %d\n",
60213 base->index);
60214 SEQ_printf(m, " .resolution: %Lu nsecs\n",
60215 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
60216 {
60217 struct proc_dir_entry *pe;
60218
60219 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60220 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
60221 +#else
60222 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
60223 +#endif
60224 if (!pe)
60225 return -ENOMEM;
60226 return 0;
60227 diff -urNp linux-2.6.39.4/kernel/time/timer_stats.c linux-2.6.39.4/kernel/time/timer_stats.c
60228 --- linux-2.6.39.4/kernel/time/timer_stats.c 2011-05-19 00:06:34.000000000 -0400
60229 +++ linux-2.6.39.4/kernel/time/timer_stats.c 2011-08-05 19:44:37.000000000 -0400
60230 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
60231 static unsigned long nr_entries;
60232 static struct entry entries[MAX_ENTRIES];
60233
60234 -static atomic_t overflow_count;
60235 +static atomic_unchecked_t overflow_count;
60236
60237 /*
60238 * The entries are in a hash-table, for fast lookup:
60239 @@ -140,7 +140,7 @@ static void reset_entries(void)
60240 nr_entries = 0;
60241 memset(entries, 0, sizeof(entries));
60242 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
60243 - atomic_set(&overflow_count, 0);
60244 + atomic_set_unchecked(&overflow_count, 0);
60245 }
60246
60247 static struct entry *alloc_entry(void)
60248 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
60249 if (likely(entry))
60250 entry->count++;
60251 else
60252 - atomic_inc(&overflow_count);
60253 + atomic_inc_unchecked(&overflow_count);
60254
60255 out_unlock:
60256 raw_spin_unlock_irqrestore(lock, flags);
60257 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
60258
60259 static void print_name_offset(struct seq_file *m, unsigned long addr)
60260 {
60261 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60262 + seq_printf(m, "<%p>", NULL);
60263 +#else
60264 char symname[KSYM_NAME_LEN];
60265
60266 if (lookup_symbol_name(addr, symname) < 0)
60267 seq_printf(m, "<%p>", (void *)addr);
60268 else
60269 seq_printf(m, "%s", symname);
60270 +#endif
60271 }
60272
60273 static int tstats_show(struct seq_file *m, void *v)
60274 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
60275
60276 seq_puts(m, "Timer Stats Version: v0.2\n");
60277 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
60278 - if (atomic_read(&overflow_count))
60279 + if (atomic_read_unchecked(&overflow_count))
60280 seq_printf(m, "Overflow: %d entries\n",
60281 - atomic_read(&overflow_count));
60282 + atomic_read_unchecked(&overflow_count));
60283
60284 for (i = 0; i < nr_entries; i++) {
60285 entry = entries + i;
60286 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
60287 {
60288 struct proc_dir_entry *pe;
60289
60290 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60291 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
60292 +#else
60293 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
60294 +#endif
60295 if (!pe)
60296 return -ENOMEM;
60297 return 0;
60298 diff -urNp linux-2.6.39.4/kernel/time.c linux-2.6.39.4/kernel/time.c
60299 --- linux-2.6.39.4/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
60300 +++ linux-2.6.39.4/kernel/time.c 2011-08-05 19:44:37.000000000 -0400
60301 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
60302 return error;
60303
60304 if (tz) {
60305 + /* we log in do_settimeofday called below, so don't log twice
60306 + */
60307 + if (!tv)
60308 + gr_log_timechange();
60309 +
60310 /* SMP safe, global irq locking makes it work. */
60311 sys_tz = *tz;
60312 update_vsyscall_tz();
60313 diff -urNp linux-2.6.39.4/kernel/timer.c linux-2.6.39.4/kernel/timer.c
60314 --- linux-2.6.39.4/kernel/timer.c 2011-05-19 00:06:34.000000000 -0400
60315 +++ linux-2.6.39.4/kernel/timer.c 2011-08-05 19:44:37.000000000 -0400
60316 @@ -1305,7 +1305,7 @@ void update_process_times(int user_tick)
60317 /*
60318 * This function runs timers and the timer-tq in bottom half context.
60319 */
60320 -static void run_timer_softirq(struct softirq_action *h)
60321 +static void run_timer_softirq(void)
60322 {
60323 struct tvec_base *base = __this_cpu_read(tvec_bases);
60324
60325 diff -urNp linux-2.6.39.4/kernel/trace/blktrace.c linux-2.6.39.4/kernel/trace/blktrace.c
60326 --- linux-2.6.39.4/kernel/trace/blktrace.c 2011-05-19 00:06:34.000000000 -0400
60327 +++ linux-2.6.39.4/kernel/trace/blktrace.c 2011-08-05 19:44:37.000000000 -0400
60328 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
60329 struct blk_trace *bt = filp->private_data;
60330 char buf[16];
60331
60332 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
60333 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
60334
60335 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
60336 }
60337 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
60338 return 1;
60339
60340 bt = buf->chan->private_data;
60341 - atomic_inc(&bt->dropped);
60342 + atomic_inc_unchecked(&bt->dropped);
60343 return 0;
60344 }
60345
60346 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60347
60348 bt->dir = dir;
60349 bt->dev = dev;
60350 - atomic_set(&bt->dropped, 0);
60351 + atomic_set_unchecked(&bt->dropped, 0);
60352
60353 ret = -EIO;
60354 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60355 diff -urNp linux-2.6.39.4/kernel/trace/ftrace.c linux-2.6.39.4/kernel/trace/ftrace.c
60356 --- linux-2.6.39.4/kernel/trace/ftrace.c 2011-06-03 00:04:14.000000000 -0400
60357 +++ linux-2.6.39.4/kernel/trace/ftrace.c 2011-08-05 20:34:06.000000000 -0400
60358 @@ -1107,13 +1107,18 @@ ftrace_code_disable(struct module *mod,
60359
60360 ip = rec->ip;
60361
60362 + ret = ftrace_arch_code_modify_prepare();
60363 + FTRACE_WARN_ON(ret);
60364 + if (ret)
60365 + return 0;
60366 +
60367 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60368 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60369 if (ret) {
60370 ftrace_bug(ret, ip);
60371 rec->flags |= FTRACE_FL_FAILED;
60372 - return 0;
60373 }
60374 - return 1;
60375 + return ret ? 0 : 1;
60376 }
60377
60378 /*
60379 @@ -2011,7 +2016,7 @@ static void ftrace_free_entry_rcu(struct
60380
60381 int
60382 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60383 - void *data)
60384 + void *data)
60385 {
60386 struct ftrace_func_probe *entry;
60387 struct ftrace_page *pg;
60388 diff -urNp linux-2.6.39.4/kernel/trace/trace.c linux-2.6.39.4/kernel/trace/trace.c
60389 --- linux-2.6.39.4/kernel/trace/trace.c 2011-05-19 00:06:34.000000000 -0400
60390 +++ linux-2.6.39.4/kernel/trace/trace.c 2011-08-05 19:44:37.000000000 -0400
60391 @@ -3330,6 +3330,8 @@ static ssize_t tracing_splice_read_pipe(
60392 size_t rem;
60393 unsigned int i;
60394
60395 + pax_track_stack();
60396 +
60397 if (splice_grow_spd(pipe, &spd))
60398 return -ENOMEM;
60399
60400 @@ -3813,6 +3815,8 @@ tracing_buffers_splice_read(struct file
60401 int entries, size, i;
60402 size_t ret;
60403
60404 + pax_track_stack();
60405 +
60406 if (splice_grow_spd(pipe, &spd))
60407 return -ENOMEM;
60408
60409 @@ -3981,10 +3985,9 @@ static const struct file_operations trac
60410 };
60411 #endif
60412
60413 -static struct dentry *d_tracer;
60414 -
60415 struct dentry *tracing_init_dentry(void)
60416 {
60417 + static struct dentry *d_tracer;
60418 static int once;
60419
60420 if (d_tracer)
60421 @@ -4004,10 +4007,9 @@ struct dentry *tracing_init_dentry(void)
60422 return d_tracer;
60423 }
60424
60425 -static struct dentry *d_percpu;
60426 -
60427 struct dentry *tracing_dentry_percpu(void)
60428 {
60429 + static struct dentry *d_percpu;
60430 static int once;
60431 struct dentry *d_tracer;
60432
60433 diff -urNp linux-2.6.39.4/kernel/trace/trace_events.c linux-2.6.39.4/kernel/trace/trace_events.c
60434 --- linux-2.6.39.4/kernel/trace/trace_events.c 2011-05-19 00:06:34.000000000 -0400
60435 +++ linux-2.6.39.4/kernel/trace/trace_events.c 2011-08-05 20:34:06.000000000 -0400
60436 @@ -1241,10 +1241,6 @@ static LIST_HEAD(ftrace_module_file_list
60437 struct ftrace_module_file_ops {
60438 struct list_head list;
60439 struct module *mod;
60440 - struct file_operations id;
60441 - struct file_operations enable;
60442 - struct file_operations format;
60443 - struct file_operations filter;
60444 };
60445
60446 static struct ftrace_module_file_ops *
60447 @@ -1265,17 +1261,12 @@ trace_create_file_ops(struct module *mod
60448
60449 file_ops->mod = mod;
60450
60451 - file_ops->id = ftrace_event_id_fops;
60452 - file_ops->id.owner = mod;
60453 -
60454 - file_ops->enable = ftrace_enable_fops;
60455 - file_ops->enable.owner = mod;
60456 -
60457 - file_ops->filter = ftrace_event_filter_fops;
60458 - file_ops->filter.owner = mod;
60459 -
60460 - file_ops->format = ftrace_event_format_fops;
60461 - file_ops->format.owner = mod;
60462 + pax_open_kernel();
60463 + *(void **)&mod->trace_id.owner = mod;
60464 + *(void **)&mod->trace_enable.owner = mod;
60465 + *(void **)&mod->trace_filter.owner = mod;
60466 + *(void **)&mod->trace_format.owner = mod;
60467 + pax_close_kernel();
60468
60469 list_add(&file_ops->list, &ftrace_module_file_list);
60470
60471 @@ -1299,8 +1290,8 @@ static void trace_module_add_events(stru
60472
60473 for_each_event(call, start, end) {
60474 __trace_add_event_call(*call, mod,
60475 - &file_ops->id, &file_ops->enable,
60476 - &file_ops->filter, &file_ops->format);
60477 + &mod->trace_id, &mod->trace_enable,
60478 + &mod->trace_filter, &mod->trace_format);
60479 }
60480 }
60481
60482 diff -urNp linux-2.6.39.4/kernel/trace/trace_mmiotrace.c linux-2.6.39.4/kernel/trace/trace_mmiotrace.c
60483 --- linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-05-19 00:06:34.000000000 -0400
60484 +++ linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-08-05 19:44:37.000000000 -0400
60485 @@ -24,7 +24,7 @@ struct header_iter {
60486 static struct trace_array *mmio_trace_array;
60487 static bool overrun_detected;
60488 static unsigned long prev_overruns;
60489 -static atomic_t dropped_count;
60490 +static atomic_unchecked_t dropped_count;
60491
60492 static void mmio_reset_data(struct trace_array *tr)
60493 {
60494 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60495
60496 static unsigned long count_overruns(struct trace_iterator *iter)
60497 {
60498 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
60499 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60500 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60501
60502 if (over > prev_overruns)
60503 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60504 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60505 sizeof(*entry), 0, pc);
60506 if (!event) {
60507 - atomic_inc(&dropped_count);
60508 + atomic_inc_unchecked(&dropped_count);
60509 return;
60510 }
60511 entry = ring_buffer_event_data(event);
60512 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60513 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60514 sizeof(*entry), 0, pc);
60515 if (!event) {
60516 - atomic_inc(&dropped_count);
60517 + atomic_inc_unchecked(&dropped_count);
60518 return;
60519 }
60520 entry = ring_buffer_event_data(event);
60521 diff -urNp linux-2.6.39.4/kernel/trace/trace_output.c linux-2.6.39.4/kernel/trace/trace_output.c
60522 --- linux-2.6.39.4/kernel/trace/trace_output.c 2011-05-19 00:06:34.000000000 -0400
60523 +++ linux-2.6.39.4/kernel/trace/trace_output.c 2011-08-05 19:44:37.000000000 -0400
60524 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60525
60526 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60527 if (!IS_ERR(p)) {
60528 - p = mangle_path(s->buffer + s->len, p, "\n");
60529 + p = mangle_path(s->buffer + s->len, p, "\n\\");
60530 if (p) {
60531 s->len = p - s->buffer;
60532 return 1;
60533 diff -urNp linux-2.6.39.4/kernel/trace/trace_stack.c linux-2.6.39.4/kernel/trace/trace_stack.c
60534 --- linux-2.6.39.4/kernel/trace/trace_stack.c 2011-05-19 00:06:34.000000000 -0400
60535 +++ linux-2.6.39.4/kernel/trace/trace_stack.c 2011-08-05 19:44:37.000000000 -0400
60536 @@ -50,7 +50,7 @@ static inline void check_stack(void)
60537 return;
60538
60539 /* we do not handle interrupt stacks yet */
60540 - if (!object_is_on_stack(&this_size))
60541 + if (!object_starts_on_stack(&this_size))
60542 return;
60543
60544 local_irq_save(flags);
60545 diff -urNp linux-2.6.39.4/kernel/trace/trace_workqueue.c linux-2.6.39.4/kernel/trace/trace_workqueue.c
60546 --- linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-05-19 00:06:34.000000000 -0400
60547 +++ linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-08-05 19:44:37.000000000 -0400
60548 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60549 int cpu;
60550 pid_t pid;
60551 /* Can be inserted from interrupt or user context, need to be atomic */
60552 - atomic_t inserted;
60553 + atomic_unchecked_t inserted;
60554 /*
60555 * Don't need to be atomic, works are serialized in a single workqueue thread
60556 * on a single CPU.
60557 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60558 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60559 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60560 if (node->pid == wq_thread->pid) {
60561 - atomic_inc(&node->inserted);
60562 + atomic_inc_unchecked(&node->inserted);
60563 goto found;
60564 }
60565 }
60566 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60567 tsk = get_pid_task(pid, PIDTYPE_PID);
60568 if (tsk) {
60569 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60570 - atomic_read(&cws->inserted), cws->executed,
60571 + atomic_read_unchecked(&cws->inserted), cws->executed,
60572 tsk->comm);
60573 put_task_struct(tsk);
60574 }
60575 diff -urNp linux-2.6.39.4/lib/bug.c linux-2.6.39.4/lib/bug.c
60576 --- linux-2.6.39.4/lib/bug.c 2011-05-19 00:06:34.000000000 -0400
60577 +++ linux-2.6.39.4/lib/bug.c 2011-08-05 19:44:37.000000000 -0400
60578 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60579 return BUG_TRAP_TYPE_NONE;
60580
60581 bug = find_bug(bugaddr);
60582 + if (!bug)
60583 + return BUG_TRAP_TYPE_NONE;
60584
60585 file = NULL;
60586 line = 0;
60587 diff -urNp linux-2.6.39.4/lib/debugobjects.c linux-2.6.39.4/lib/debugobjects.c
60588 --- linux-2.6.39.4/lib/debugobjects.c 2011-07-09 09:18:51.000000000 -0400
60589 +++ linux-2.6.39.4/lib/debugobjects.c 2011-08-05 19:44:37.000000000 -0400
60590 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60591 if (limit > 4)
60592 return;
60593
60594 - is_on_stack = object_is_on_stack(addr);
60595 + is_on_stack = object_starts_on_stack(addr);
60596 if (is_on_stack == onstack)
60597 return;
60598
60599 diff -urNp linux-2.6.39.4/lib/dma-debug.c linux-2.6.39.4/lib/dma-debug.c
60600 --- linux-2.6.39.4/lib/dma-debug.c 2011-05-19 00:06:34.000000000 -0400
60601 +++ linux-2.6.39.4/lib/dma-debug.c 2011-08-05 19:44:37.000000000 -0400
60602 @@ -862,7 +862,7 @@ out:
60603
60604 static void check_for_stack(struct device *dev, void *addr)
60605 {
60606 - if (object_is_on_stack(addr))
60607 + if (object_starts_on_stack(addr))
60608 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60609 "stack [addr=%p]\n", addr);
60610 }
60611 diff -urNp linux-2.6.39.4/lib/inflate.c linux-2.6.39.4/lib/inflate.c
60612 --- linux-2.6.39.4/lib/inflate.c 2011-05-19 00:06:34.000000000 -0400
60613 +++ linux-2.6.39.4/lib/inflate.c 2011-08-05 19:44:37.000000000 -0400
60614 @@ -269,7 +269,7 @@ static void free(void *where)
60615 malloc_ptr = free_mem_ptr;
60616 }
60617 #else
60618 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60619 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60620 #define free(a) kfree(a)
60621 #endif
60622
60623 diff -urNp linux-2.6.39.4/lib/Kconfig.debug linux-2.6.39.4/lib/Kconfig.debug
60624 --- linux-2.6.39.4/lib/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
60625 +++ linux-2.6.39.4/lib/Kconfig.debug 2011-08-05 19:44:37.000000000 -0400
60626 @@ -1078,6 +1078,7 @@ config LATENCYTOP
60627 depends on DEBUG_KERNEL
60628 depends on STACKTRACE_SUPPORT
60629 depends on PROC_FS
60630 + depends on !GRKERNSEC_HIDESYM
60631 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60632 select KALLSYMS
60633 select KALLSYMS_ALL
60634 diff -urNp linux-2.6.39.4/lib/kref.c linux-2.6.39.4/lib/kref.c
60635 --- linux-2.6.39.4/lib/kref.c 2011-05-19 00:06:34.000000000 -0400
60636 +++ linux-2.6.39.4/lib/kref.c 2011-08-05 19:44:37.000000000 -0400
60637 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60638 */
60639 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60640 {
60641 - WARN_ON(release == NULL);
60642 + BUG_ON(release == NULL);
60643 WARN_ON(release == (void (*)(struct kref *))kfree);
60644
60645 if (atomic_dec_and_test(&kref->refcount)) {
60646 diff -urNp linux-2.6.39.4/lib/radix-tree.c linux-2.6.39.4/lib/radix-tree.c
60647 --- linux-2.6.39.4/lib/radix-tree.c 2011-05-19 00:06:34.000000000 -0400
60648 +++ linux-2.6.39.4/lib/radix-tree.c 2011-08-05 19:44:37.000000000 -0400
60649 @@ -80,7 +80,7 @@ struct radix_tree_preload {
60650 int nr;
60651 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60652 };
60653 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60654 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60655
60656 static inline void *ptr_to_indirect(void *ptr)
60657 {
60658 diff -urNp linux-2.6.39.4/lib/vsprintf.c linux-2.6.39.4/lib/vsprintf.c
60659 --- linux-2.6.39.4/lib/vsprintf.c 2011-05-19 00:06:34.000000000 -0400
60660 +++ linux-2.6.39.4/lib/vsprintf.c 2011-08-05 19:44:37.000000000 -0400
60661 @@ -16,6 +16,9 @@
60662 * - scnprintf and vscnprintf
60663 */
60664
60665 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60666 +#define __INCLUDED_BY_HIDESYM 1
60667 +#endif
60668 #include <stdarg.h>
60669 #include <linux/module.h>
60670 #include <linux/types.h>
60671 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60672 char sym[KSYM_SYMBOL_LEN];
60673 if (ext == 'B')
60674 sprint_backtrace(sym, value);
60675 - else if (ext != 'f' && ext != 's')
60676 + else if (ext != 'f' && ext != 's' && ext != 'a')
60677 sprint_symbol(sym, value);
60678 else
60679 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60680 @@ -797,7 +800,11 @@ char *uuid_string(char *buf, char *end,
60681 return string(buf, end, uuid, spec);
60682 }
60683
60684 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60685 +int kptr_restrict __read_mostly = 2;
60686 +#else
60687 int kptr_restrict __read_mostly;
60688 +#endif
60689
60690 /*
60691 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60692 @@ -811,6 +818,8 @@ int kptr_restrict __read_mostly;
60693 * - 'S' For symbolic direct pointers with offset
60694 * - 's' For symbolic direct pointers without offset
60695 * - 'B' For backtraced symbolic direct pointers with offset
60696 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60697 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60698 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60699 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60700 * - 'M' For a 6-byte MAC address, it prints the address in the
60701 @@ -855,12 +864,12 @@ char *pointer(const char *fmt, char *buf
60702 {
60703 if (!ptr && *fmt != 'K') {
60704 /*
60705 - * Print (null) with the same width as a pointer so it makes
60706 + * Print (nil) with the same width as a pointer so it makes
60707 * tabular output look nice.
60708 */
60709 if (spec.field_width == -1)
60710 spec.field_width = 2 * sizeof(void *);
60711 - return string(buf, end, "(null)", spec);
60712 + return string(buf, end, "(nil)", spec);
60713 }
60714
60715 switch (*fmt) {
60716 @@ -870,6 +879,13 @@ char *pointer(const char *fmt, char *buf
60717 /* Fallthrough */
60718 case 'S':
60719 case 's':
60720 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60721 + break;
60722 +#else
60723 + return symbol_string(buf, end, ptr, spec, *fmt);
60724 +#endif
60725 + case 'A':
60726 + case 'a':
60727 case 'B':
60728 return symbol_string(buf, end, ptr, spec, *fmt);
60729 case 'R':
60730 @@ -1632,11 +1648,11 @@ int bstr_printf(char *buf, size_t size,
60731 typeof(type) value; \
60732 if (sizeof(type) == 8) { \
60733 args = PTR_ALIGN(args, sizeof(u32)); \
60734 - *(u32 *)&value = *(u32 *)args; \
60735 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60736 + *(u32 *)&value = *(const u32 *)args; \
60737 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60738 } else { \
60739 args = PTR_ALIGN(args, sizeof(type)); \
60740 - value = *(typeof(type) *)args; \
60741 + value = *(const typeof(type) *)args; \
60742 } \
60743 args += sizeof(type); \
60744 value; \
60745 @@ -1699,7 +1715,7 @@ int bstr_printf(char *buf, size_t size,
60746 case FORMAT_TYPE_STR: {
60747 const char *str_arg = args;
60748 args += strlen(str_arg) + 1;
60749 - str = string(str, end, (char *)str_arg, spec);
60750 + str = string(str, end, str_arg, spec);
60751 break;
60752 }
60753
60754 diff -urNp linux-2.6.39.4/localversion-grsec linux-2.6.39.4/localversion-grsec
60755 --- linux-2.6.39.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60756 +++ linux-2.6.39.4/localversion-grsec 2011-08-05 19:44:37.000000000 -0400
60757 @@ -0,0 +1 @@
60758 +-grsec
60759 diff -urNp linux-2.6.39.4/Makefile linux-2.6.39.4/Makefile
60760 --- linux-2.6.39.4/Makefile 2011-08-05 21:11:51.000000000 -0400
60761 +++ linux-2.6.39.4/Makefile 2011-08-07 14:17:20.000000000 -0400
60762 @@ -237,8 +237,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60763
60764 HOSTCC = gcc
60765 HOSTCXX = g++
60766 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60767 -HOSTCXXFLAGS = -O2
60768 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60769 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60770 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60771
60772 # Decide whether to build built-in, modular, or both.
60773 # Normally, just do built-in.
60774 @@ -356,10 +357,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60775 KBUILD_CPPFLAGS := -D__KERNEL__
60776
60777 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60778 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
60779 -fno-strict-aliasing -fno-common \
60780 -Werror-implicit-function-declaration \
60781 -Wno-format-security \
60782 -fno-delete-null-pointer-checks
60783 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60784 KBUILD_AFLAGS_KERNEL :=
60785 KBUILD_CFLAGS_KERNEL :=
60786 KBUILD_AFLAGS := -D__ASSEMBLY__
60787 @@ -397,8 +400,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
60788 # Rules shared between *config targets and build targets
60789
60790 # Basic helpers built in scripts/
60791 -PHONY += scripts_basic
60792 -scripts_basic:
60793 +PHONY += scripts_basic gcc-plugins
60794 +scripts_basic: gcc-plugins
60795 $(Q)$(MAKE) $(build)=scripts/basic
60796 $(Q)rm -f .tmp_quiet_recordmcount
60797
60798 @@ -548,6 +551,25 @@ else
60799 KBUILD_CFLAGS += -O2
60800 endif
60801
60802 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60803 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60804 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
60805 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60806 +endif
60807 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60808 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60809 +gcc-plugins:
60810 + $(Q)$(MAKE) $(build)=tools/gcc
60811 +else
60812 +gcc-plugins:
60813 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60814 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60815 +else
60816 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60817 +endif
60818 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60819 +endif
60820 +
60821 include $(srctree)/arch/$(SRCARCH)/Makefile
60822
60823 ifneq ($(CONFIG_FRAME_WARN),0)
60824 @@ -685,7 +707,7 @@ export mod_strip_cmd
60825
60826
60827 ifeq ($(KBUILD_EXTMOD),)
60828 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60829 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60830
60831 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60832 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60833 @@ -947,7 +969,7 @@ ifneq ($(KBUILD_SRC),)
60834 endif
60835
60836 # prepare2 creates a makefile if using a separate output directory
60837 -prepare2: prepare3 outputmakefile
60838 +prepare2: prepare3 outputmakefile gcc-plugins
60839
60840 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60841 include/config/auto.conf
60842 @@ -1375,7 +1397,7 @@ clean: $(clean-dirs)
60843 $(call cmd,rmdirs)
60844 $(call cmd,rmfiles)
60845 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60846 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60847 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60848 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60849 -o -name '*.symtypes' -o -name 'modules.order' \
60850 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60851 diff -urNp linux-2.6.39.4/mm/filemap.c linux-2.6.39.4/mm/filemap.c
60852 --- linux-2.6.39.4/mm/filemap.c 2011-05-19 00:06:34.000000000 -0400
60853 +++ linux-2.6.39.4/mm/filemap.c 2011-08-05 19:44:37.000000000 -0400
60854 @@ -1724,7 +1724,7 @@ int generic_file_mmap(struct file * file
60855 struct address_space *mapping = file->f_mapping;
60856
60857 if (!mapping->a_ops->readpage)
60858 - return -ENOEXEC;
60859 + return -ENODEV;
60860 file_accessed(file);
60861 vma->vm_ops = &generic_file_vm_ops;
60862 vma->vm_flags |= VM_CAN_NONLINEAR;
60863 @@ -2120,6 +2120,7 @@ inline int generic_write_checks(struct f
60864 *pos = i_size_read(inode);
60865
60866 if (limit != RLIM_INFINITY) {
60867 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60868 if (*pos >= limit) {
60869 send_sig(SIGXFSZ, current, 0);
60870 return -EFBIG;
60871 diff -urNp linux-2.6.39.4/mm/fremap.c linux-2.6.39.4/mm/fremap.c
60872 --- linux-2.6.39.4/mm/fremap.c 2011-05-19 00:06:34.000000000 -0400
60873 +++ linux-2.6.39.4/mm/fremap.c 2011-08-05 19:44:37.000000000 -0400
60874 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60875 retry:
60876 vma = find_vma(mm, start);
60877
60878 +#ifdef CONFIG_PAX_SEGMEXEC
60879 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60880 + goto out;
60881 +#endif
60882 +
60883 /*
60884 * Make sure the vma is shared, that it supports prefaulting,
60885 * and that the remapped range is valid and fully within
60886 @@ -224,7 +229,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60887 /*
60888 * drop PG_Mlocked flag for over-mapped range
60889 */
60890 - unsigned int saved_flags = vma->vm_flags;
60891 + unsigned long saved_flags = vma->vm_flags;
60892 munlock_vma_pages_range(vma, start, start + size);
60893 vma->vm_flags = saved_flags;
60894 }
60895 diff -urNp linux-2.6.39.4/mm/highmem.c linux-2.6.39.4/mm/highmem.c
60896 --- linux-2.6.39.4/mm/highmem.c 2011-05-19 00:06:34.000000000 -0400
60897 +++ linux-2.6.39.4/mm/highmem.c 2011-08-05 19:44:37.000000000 -0400
60898 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60899 * So no dangers, even with speculative execution.
60900 */
60901 page = pte_page(pkmap_page_table[i]);
60902 + pax_open_kernel();
60903 pte_clear(&init_mm, (unsigned long)page_address(page),
60904 &pkmap_page_table[i]);
60905 -
60906 + pax_close_kernel();
60907 set_page_address(page, NULL);
60908 need_flush = 1;
60909 }
60910 @@ -186,9 +187,11 @@ start:
60911 }
60912 }
60913 vaddr = PKMAP_ADDR(last_pkmap_nr);
60914 +
60915 + pax_open_kernel();
60916 set_pte_at(&init_mm, vaddr,
60917 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60918 -
60919 + pax_close_kernel();
60920 pkmap_count[last_pkmap_nr] = 1;
60921 set_page_address(page, (void *)vaddr);
60922
60923 diff -urNp linux-2.6.39.4/mm/huge_memory.c linux-2.6.39.4/mm/huge_memory.c
60924 --- linux-2.6.39.4/mm/huge_memory.c 2011-05-19 00:06:34.000000000 -0400
60925 +++ linux-2.6.39.4/mm/huge_memory.c 2011-08-05 19:44:37.000000000 -0400
60926 @@ -702,7 +702,7 @@ out:
60927 * run pte_offset_map on the pmd, if an huge pmd could
60928 * materialize from under us from a different thread.
60929 */
60930 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60931 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60932 return VM_FAULT_OOM;
60933 /* if an huge pmd materialized from under us just retry later */
60934 if (unlikely(pmd_trans_huge(*pmd)))
60935 diff -urNp linux-2.6.39.4/mm/hugetlb.c linux-2.6.39.4/mm/hugetlb.c
60936 --- linux-2.6.39.4/mm/hugetlb.c 2011-07-09 09:18:51.000000000 -0400
60937 +++ linux-2.6.39.4/mm/hugetlb.c 2011-08-05 19:44:37.000000000 -0400
60938 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60939 return 1;
60940 }
60941
60942 +#ifdef CONFIG_PAX_SEGMEXEC
60943 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60944 +{
60945 + struct mm_struct *mm = vma->vm_mm;
60946 + struct vm_area_struct *vma_m;
60947 + unsigned long address_m;
60948 + pte_t *ptep_m;
60949 +
60950 + vma_m = pax_find_mirror_vma(vma);
60951 + if (!vma_m)
60952 + return;
60953 +
60954 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60955 + address_m = address + SEGMEXEC_TASK_SIZE;
60956 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60957 + get_page(page_m);
60958 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
60959 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60960 +}
60961 +#endif
60962 +
60963 /*
60964 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60965 */
60966 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
60967 make_huge_pte(vma, new_page, 1));
60968 page_remove_rmap(old_page);
60969 hugepage_add_new_anon_rmap(new_page, vma, address);
60970 +
60971 +#ifdef CONFIG_PAX_SEGMEXEC
60972 + pax_mirror_huge_pte(vma, address, new_page);
60973 +#endif
60974 +
60975 /* Make the old page be freed below */
60976 new_page = old_page;
60977 mmu_notifier_invalidate_range_end(mm,
60978 @@ -2591,6 +2617,10 @@ retry:
60979 && (vma->vm_flags & VM_SHARED)));
60980 set_huge_pte_at(mm, address, ptep, new_pte);
60981
60982 +#ifdef CONFIG_PAX_SEGMEXEC
60983 + pax_mirror_huge_pte(vma, address, page);
60984 +#endif
60985 +
60986 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60987 /* Optimization, do the COW without a second fault */
60988 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60989 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60990 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60991 struct hstate *h = hstate_vma(vma);
60992
60993 +#ifdef CONFIG_PAX_SEGMEXEC
60994 + struct vm_area_struct *vma_m;
60995 +#endif
60996 +
60997 ptep = huge_pte_offset(mm, address);
60998 if (ptep) {
60999 entry = huge_ptep_get(ptep);
61000 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
61001 VM_FAULT_SET_HINDEX(h - hstates);
61002 }
61003
61004 +#ifdef CONFIG_PAX_SEGMEXEC
61005 + vma_m = pax_find_mirror_vma(vma);
61006 + if (vma_m) {
61007 + unsigned long address_m;
61008 +
61009 + if (vma->vm_start > vma_m->vm_start) {
61010 + address_m = address;
61011 + address -= SEGMEXEC_TASK_SIZE;
61012 + vma = vma_m;
61013 + h = hstate_vma(vma);
61014 + } else
61015 + address_m = address + SEGMEXEC_TASK_SIZE;
61016 +
61017 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
61018 + return VM_FAULT_OOM;
61019 + address_m &= HPAGE_MASK;
61020 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
61021 + }
61022 +#endif
61023 +
61024 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
61025 if (!ptep)
61026 return VM_FAULT_OOM;
61027 diff -urNp linux-2.6.39.4/mm/internal.h linux-2.6.39.4/mm/internal.h
61028 --- linux-2.6.39.4/mm/internal.h 2011-05-19 00:06:34.000000000 -0400
61029 +++ linux-2.6.39.4/mm/internal.h 2011-08-05 19:44:37.000000000 -0400
61030 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
61031 * in mm/page_alloc.c
61032 */
61033 extern void __free_pages_bootmem(struct page *page, unsigned int order);
61034 +extern void free_compound_page(struct page *page);
61035 extern void prep_compound_page(struct page *page, unsigned long order);
61036 #ifdef CONFIG_MEMORY_FAILURE
61037 extern bool is_free_buddy_page(struct page *page);
61038 diff -urNp linux-2.6.39.4/mm/Kconfig linux-2.6.39.4/mm/Kconfig
61039 --- linux-2.6.39.4/mm/Kconfig 2011-05-19 00:06:34.000000000 -0400
61040 +++ linux-2.6.39.4/mm/Kconfig 2011-08-05 19:44:37.000000000 -0400
61041 @@ -240,7 +240,7 @@ config KSM
61042 config DEFAULT_MMAP_MIN_ADDR
61043 int "Low address space to protect from user allocation"
61044 depends on MMU
61045 - default 4096
61046 + default 65536
61047 help
61048 This is the portion of low virtual memory which should be protected
61049 from userspace allocation. Keeping a user from writing to low pages
61050 diff -urNp linux-2.6.39.4/mm/kmemleak.c linux-2.6.39.4/mm/kmemleak.c
61051 --- linux-2.6.39.4/mm/kmemleak.c 2011-06-03 00:04:14.000000000 -0400
61052 +++ linux-2.6.39.4/mm/kmemleak.c 2011-08-05 19:44:37.000000000 -0400
61053 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
61054
61055 for (i = 0; i < object->trace_len; i++) {
61056 void *ptr = (void *)object->trace[i];
61057 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
61058 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
61059 }
61060 }
61061
61062 diff -urNp linux-2.6.39.4/mm/maccess.c linux-2.6.39.4/mm/maccess.c
61063 --- linux-2.6.39.4/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
61064 +++ linux-2.6.39.4/mm/maccess.c 2011-08-05 19:44:37.000000000 -0400
61065 @@ -15,10 +15,10 @@
61066 * happens, handle that and return -EFAULT.
61067 */
61068
61069 -long __weak probe_kernel_read(void *dst, void *src, size_t size)
61070 +long __weak probe_kernel_read(void *dst, const void *src, size_t size)
61071 __attribute__((alias("__probe_kernel_read")));
61072
61073 -long __probe_kernel_read(void *dst, void *src, size_t size)
61074 +long __probe_kernel_read(void *dst, const void *src, size_t size)
61075 {
61076 long ret;
61077 mm_segment_t old_fs = get_fs();
61078 @@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
61079 * Safely write to address @dst from the buffer at @src. If a kernel fault
61080 * happens, handle that and return -EFAULT.
61081 */
61082 -long __weak probe_kernel_write(void *dst, void *src, size_t size)
61083 +long __weak probe_kernel_write(void *dst, const void *src, size_t size)
61084 __attribute__((alias("__probe_kernel_write")));
61085
61086 -long __probe_kernel_write(void *dst, void *src, size_t size)
61087 +long __probe_kernel_write(void *dst, const void *src, size_t size)
61088 {
61089 long ret;
61090 mm_segment_t old_fs = get_fs();
61091 diff -urNp linux-2.6.39.4/mm/madvise.c linux-2.6.39.4/mm/madvise.c
61092 --- linux-2.6.39.4/mm/madvise.c 2011-05-19 00:06:34.000000000 -0400
61093 +++ linux-2.6.39.4/mm/madvise.c 2011-08-05 19:44:37.000000000 -0400
61094 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
61095 pgoff_t pgoff;
61096 unsigned long new_flags = vma->vm_flags;
61097
61098 +#ifdef CONFIG_PAX_SEGMEXEC
61099 + struct vm_area_struct *vma_m;
61100 +#endif
61101 +
61102 switch (behavior) {
61103 case MADV_NORMAL:
61104 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
61105 @@ -110,6 +114,13 @@ success:
61106 /*
61107 * vm_flags is protected by the mmap_sem held in write mode.
61108 */
61109 +
61110 +#ifdef CONFIG_PAX_SEGMEXEC
61111 + vma_m = pax_find_mirror_vma(vma);
61112 + if (vma_m)
61113 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
61114 +#endif
61115 +
61116 vma->vm_flags = new_flags;
61117
61118 out:
61119 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
61120 struct vm_area_struct ** prev,
61121 unsigned long start, unsigned long end)
61122 {
61123 +
61124 +#ifdef CONFIG_PAX_SEGMEXEC
61125 + struct vm_area_struct *vma_m;
61126 +#endif
61127 +
61128 *prev = vma;
61129 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
61130 return -EINVAL;
61131 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
61132 zap_page_range(vma, start, end - start, &details);
61133 } else
61134 zap_page_range(vma, start, end - start, NULL);
61135 +
61136 +#ifdef CONFIG_PAX_SEGMEXEC
61137 + vma_m = pax_find_mirror_vma(vma);
61138 + if (vma_m) {
61139 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
61140 + struct zap_details details = {
61141 + .nonlinear_vma = vma_m,
61142 + .last_index = ULONG_MAX,
61143 + };
61144 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
61145 + } else
61146 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
61147 + }
61148 +#endif
61149 +
61150 return 0;
61151 }
61152
61153 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
61154 if (end < start)
61155 goto out;
61156
61157 +#ifdef CONFIG_PAX_SEGMEXEC
61158 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
61159 + if (end > SEGMEXEC_TASK_SIZE)
61160 + goto out;
61161 + } else
61162 +#endif
61163 +
61164 + if (end > TASK_SIZE)
61165 + goto out;
61166 +
61167 error = 0;
61168 if (end == start)
61169 goto out;
61170 diff -urNp linux-2.6.39.4/mm/memory.c linux-2.6.39.4/mm/memory.c
61171 --- linux-2.6.39.4/mm/memory.c 2011-05-19 00:06:34.000000000 -0400
61172 +++ linux-2.6.39.4/mm/memory.c 2011-08-05 19:44:37.000000000 -0400
61173 @@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
61174 return;
61175
61176 pmd = pmd_offset(pud, start);
61177 +
61178 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
61179 pud_clear(pud);
61180 pmd_free_tlb(tlb, pmd, start);
61181 +#endif
61182 +
61183 }
61184
61185 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
61186 @@ -291,9 +295,12 @@ static inline void free_pud_range(struct
61187 if (end - 1 > ceiling - 1)
61188 return;
61189
61190 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
61191 pud = pud_offset(pgd, start);
61192 pgd_clear(pgd);
61193 pud_free_tlb(tlb, pud, start);
61194 +#endif
61195 +
61196 }
61197
61198 /*
61199 @@ -1410,12 +1417,6 @@ no_page_table:
61200 return page;
61201 }
61202
61203 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
61204 -{
61205 - return stack_guard_page_start(vma, addr) ||
61206 - stack_guard_page_end(vma, addr+PAGE_SIZE);
61207 -}
61208 -
61209 /**
61210 * __get_user_pages() - pin user pages in memory
61211 * @tsk: task_struct of target task
61212 @@ -1488,10 +1489,10 @@ int __get_user_pages(struct task_struct
61213 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
61214 i = 0;
61215
61216 - do {
61217 + while (nr_pages) {
61218 struct vm_area_struct *vma;
61219
61220 - vma = find_extend_vma(mm, start);
61221 + vma = find_vma(mm, start);
61222 if (!vma && in_gate_area(mm, start)) {
61223 unsigned long pg = start & PAGE_MASK;
61224 pgd_t *pgd;
61225 @@ -1539,7 +1540,7 @@ int __get_user_pages(struct task_struct
61226 goto next_page;
61227 }
61228
61229 - if (!vma ||
61230 + if (!vma || start < vma->vm_start ||
61231 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
61232 !(vm_flags & vma->vm_flags))
61233 return i ? : -EFAULT;
61234 @@ -1566,11 +1567,6 @@ int __get_user_pages(struct task_struct
61235 int ret;
61236 unsigned int fault_flags = 0;
61237
61238 - /* For mlock, just skip the stack guard page. */
61239 - if (foll_flags & FOLL_MLOCK) {
61240 - if (stack_guard_page(vma, start))
61241 - goto next_page;
61242 - }
61243 if (foll_flags & FOLL_WRITE)
61244 fault_flags |= FAULT_FLAG_WRITE;
61245 if (nonblocking)
61246 @@ -1644,7 +1640,7 @@ next_page:
61247 start += PAGE_SIZE;
61248 nr_pages--;
61249 } while (nr_pages && start < vma->vm_end);
61250 - } while (nr_pages);
61251 + }
61252 return i;
61253 }
61254 EXPORT_SYMBOL(__get_user_pages);
61255 @@ -1795,6 +1791,10 @@ static int insert_page(struct vm_area_st
61256 page_add_file_rmap(page);
61257 set_pte_at(mm, addr, pte, mk_pte(page, prot));
61258
61259 +#ifdef CONFIG_PAX_SEGMEXEC
61260 + pax_mirror_file_pte(vma, addr, page, ptl);
61261 +#endif
61262 +
61263 retval = 0;
61264 pte_unmap_unlock(pte, ptl);
61265 return retval;
61266 @@ -1829,10 +1829,22 @@ out:
61267 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
61268 struct page *page)
61269 {
61270 +
61271 +#ifdef CONFIG_PAX_SEGMEXEC
61272 + struct vm_area_struct *vma_m;
61273 +#endif
61274 +
61275 if (addr < vma->vm_start || addr >= vma->vm_end)
61276 return -EFAULT;
61277 if (!page_count(page))
61278 return -EINVAL;
61279 +
61280 +#ifdef CONFIG_PAX_SEGMEXEC
61281 + vma_m = pax_find_mirror_vma(vma);
61282 + if (vma_m)
61283 + vma_m->vm_flags |= VM_INSERTPAGE;
61284 +#endif
61285 +
61286 vma->vm_flags |= VM_INSERTPAGE;
61287 return insert_page(vma, addr, page, vma->vm_page_prot);
61288 }
61289 @@ -1918,6 +1930,7 @@ int vm_insert_mixed(struct vm_area_struc
61290 unsigned long pfn)
61291 {
61292 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
61293 + BUG_ON(vma->vm_mirror);
61294
61295 if (addr < vma->vm_start || addr >= vma->vm_end)
61296 return -EFAULT;
61297 @@ -2233,6 +2246,186 @@ static inline void cow_user_page(struct
61298 copy_user_highpage(dst, src, va, vma);
61299 }
61300
61301 +#ifdef CONFIG_PAX_SEGMEXEC
61302 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
61303 +{
61304 + struct mm_struct *mm = vma->vm_mm;
61305 + spinlock_t *ptl;
61306 + pte_t *pte, entry;
61307 +
61308 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
61309 + entry = *pte;
61310 + if (!pte_present(entry)) {
61311 + if (!pte_none(entry)) {
61312 + BUG_ON(pte_file(entry));
61313 + free_swap_and_cache(pte_to_swp_entry(entry));
61314 + pte_clear_not_present_full(mm, address, pte, 0);
61315 + }
61316 + } else {
61317 + struct page *page;
61318 +
61319 + flush_cache_page(vma, address, pte_pfn(entry));
61320 + entry = ptep_clear_flush(vma, address, pte);
61321 + BUG_ON(pte_dirty(entry));
61322 + page = vm_normal_page(vma, address, entry);
61323 + if (page) {
61324 + update_hiwater_rss(mm);
61325 + if (PageAnon(page))
61326 + dec_mm_counter_fast(mm, MM_ANONPAGES);
61327 + else
61328 + dec_mm_counter_fast(mm, MM_FILEPAGES);
61329 + page_remove_rmap(page);
61330 + page_cache_release(page);
61331 + }
61332 + }
61333 + pte_unmap_unlock(pte, ptl);
61334 +}
61335 +
61336 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
61337 + *
61338 + * the ptl of the lower mapped page is held on entry and is not released on exit
61339 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
61340 + */
61341 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61342 +{
61343 + struct mm_struct *mm = vma->vm_mm;
61344 + unsigned long address_m;
61345 + spinlock_t *ptl_m;
61346 + struct vm_area_struct *vma_m;
61347 + pmd_t *pmd_m;
61348 + pte_t *pte_m, entry_m;
61349 +
61350 + BUG_ON(!page_m || !PageAnon(page_m));
61351 +
61352 + vma_m = pax_find_mirror_vma(vma);
61353 + if (!vma_m)
61354 + return;
61355 +
61356 + BUG_ON(!PageLocked(page_m));
61357 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61358 + address_m = address + SEGMEXEC_TASK_SIZE;
61359 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61360 + pte_m = pte_offset_map(pmd_m, address_m);
61361 + ptl_m = pte_lockptr(mm, pmd_m);
61362 + if (ptl != ptl_m) {
61363 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61364 + if (!pte_none(*pte_m))
61365 + goto out;
61366 + }
61367 +
61368 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61369 + page_cache_get(page_m);
61370 + page_add_anon_rmap(page_m, vma_m, address_m);
61371 + inc_mm_counter_fast(mm, MM_ANONPAGES);
61372 + set_pte_at(mm, address_m, pte_m, entry_m);
61373 + update_mmu_cache(vma_m, address_m, entry_m);
61374 +out:
61375 + if (ptl != ptl_m)
61376 + spin_unlock(ptl_m);
61377 + pte_unmap(pte_m);
61378 + unlock_page(page_m);
61379 +}
61380 +
61381 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61382 +{
61383 + struct mm_struct *mm = vma->vm_mm;
61384 + unsigned long address_m;
61385 + spinlock_t *ptl_m;
61386 + struct vm_area_struct *vma_m;
61387 + pmd_t *pmd_m;
61388 + pte_t *pte_m, entry_m;
61389 +
61390 + BUG_ON(!page_m || PageAnon(page_m));
61391 +
61392 + vma_m = pax_find_mirror_vma(vma);
61393 + if (!vma_m)
61394 + return;
61395 +
61396 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61397 + address_m = address + SEGMEXEC_TASK_SIZE;
61398 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61399 + pte_m = pte_offset_map(pmd_m, address_m);
61400 + ptl_m = pte_lockptr(mm, pmd_m);
61401 + if (ptl != ptl_m) {
61402 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61403 + if (!pte_none(*pte_m))
61404 + goto out;
61405 + }
61406 +
61407 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61408 + page_cache_get(page_m);
61409 + page_add_file_rmap(page_m);
61410 + inc_mm_counter_fast(mm, MM_FILEPAGES);
61411 + set_pte_at(mm, address_m, pte_m, entry_m);
61412 + update_mmu_cache(vma_m, address_m, entry_m);
61413 +out:
61414 + if (ptl != ptl_m)
61415 + spin_unlock(ptl_m);
61416 + pte_unmap(pte_m);
61417 +}
61418 +
61419 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61420 +{
61421 + struct mm_struct *mm = vma->vm_mm;
61422 + unsigned long address_m;
61423 + spinlock_t *ptl_m;
61424 + struct vm_area_struct *vma_m;
61425 + pmd_t *pmd_m;
61426 + pte_t *pte_m, entry_m;
61427 +
61428 + vma_m = pax_find_mirror_vma(vma);
61429 + if (!vma_m)
61430 + return;
61431 +
61432 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61433 + address_m = address + SEGMEXEC_TASK_SIZE;
61434 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61435 + pte_m = pte_offset_map(pmd_m, address_m);
61436 + ptl_m = pte_lockptr(mm, pmd_m);
61437 + if (ptl != ptl_m) {
61438 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61439 + if (!pte_none(*pte_m))
61440 + goto out;
61441 + }
61442 +
61443 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61444 + set_pte_at(mm, address_m, pte_m, entry_m);
61445 +out:
61446 + if (ptl != ptl_m)
61447 + spin_unlock(ptl_m);
61448 + pte_unmap(pte_m);
61449 +}
61450 +
61451 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61452 +{
61453 + struct page *page_m;
61454 + pte_t entry;
61455 +
61456 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61457 + goto out;
61458 +
61459 + entry = *pte;
61460 + page_m = vm_normal_page(vma, address, entry);
61461 + if (!page_m)
61462 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61463 + else if (PageAnon(page_m)) {
61464 + if (pax_find_mirror_vma(vma)) {
61465 + pte_unmap_unlock(pte, ptl);
61466 + lock_page(page_m);
61467 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61468 + if (pte_same(entry, *pte))
61469 + pax_mirror_anon_pte(vma, address, page_m, ptl);
61470 + else
61471 + unlock_page(page_m);
61472 + }
61473 + } else
61474 + pax_mirror_file_pte(vma, address, page_m, ptl);
61475 +
61476 +out:
61477 + pte_unmap_unlock(pte, ptl);
61478 +}
61479 +#endif
61480 +
61481 /*
61482 * This routine handles present pages, when users try to write
61483 * to a shared page. It is done by copying the page to a new address
61484 @@ -2444,6 +2637,12 @@ gotten:
61485 */
61486 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61487 if (likely(pte_same(*page_table, orig_pte))) {
61488 +
61489 +#ifdef CONFIG_PAX_SEGMEXEC
61490 + if (pax_find_mirror_vma(vma))
61491 + BUG_ON(!trylock_page(new_page));
61492 +#endif
61493 +
61494 if (old_page) {
61495 if (!PageAnon(old_page)) {
61496 dec_mm_counter_fast(mm, MM_FILEPAGES);
61497 @@ -2495,6 +2694,10 @@ gotten:
61498 page_remove_rmap(old_page);
61499 }
61500
61501 +#ifdef CONFIG_PAX_SEGMEXEC
61502 + pax_mirror_anon_pte(vma, address, new_page, ptl);
61503 +#endif
61504 +
61505 /* Free the old page.. */
61506 new_page = old_page;
61507 ret |= VM_FAULT_WRITE;
61508 @@ -2905,6 +3108,11 @@ static int do_swap_page(struct mm_struct
61509 swap_free(entry);
61510 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61511 try_to_free_swap(page);
61512 +
61513 +#ifdef CONFIG_PAX_SEGMEXEC
61514 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61515 +#endif
61516 +
61517 unlock_page(page);
61518 if (swapcache) {
61519 /*
61520 @@ -2928,6 +3136,11 @@ static int do_swap_page(struct mm_struct
61521
61522 /* No need to invalidate - it was non-present before */
61523 update_mmu_cache(vma, address, page_table);
61524 +
61525 +#ifdef CONFIG_PAX_SEGMEXEC
61526 + pax_mirror_anon_pte(vma, address, page, ptl);
61527 +#endif
61528 +
61529 unlock:
61530 pte_unmap_unlock(page_table, ptl);
61531 out:
61532 @@ -2947,40 +3160,6 @@ out_release:
61533 }
61534
61535 /*
61536 - * This is like a special single-page "expand_{down|up}wards()",
61537 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
61538 - * doesn't hit another vma.
61539 - */
61540 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61541 -{
61542 - address &= PAGE_MASK;
61543 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61544 - struct vm_area_struct *prev = vma->vm_prev;
61545 -
61546 - /*
61547 - * Is there a mapping abutting this one below?
61548 - *
61549 - * That's only ok if it's the same stack mapping
61550 - * that has gotten split..
61551 - */
61552 - if (prev && prev->vm_end == address)
61553 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61554 -
61555 - expand_stack(vma, address - PAGE_SIZE);
61556 - }
61557 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61558 - struct vm_area_struct *next = vma->vm_next;
61559 -
61560 - /* As VM_GROWSDOWN but s/below/above/ */
61561 - if (next && next->vm_start == address + PAGE_SIZE)
61562 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61563 -
61564 - expand_upwards(vma, address + PAGE_SIZE);
61565 - }
61566 - return 0;
61567 -}
61568 -
61569 -/*
61570 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61571 * but allow concurrent faults), and pte mapped but not yet locked.
61572 * We return with mmap_sem still held, but pte unmapped and unlocked.
61573 @@ -2989,27 +3168,23 @@ static int do_anonymous_page(struct mm_s
61574 unsigned long address, pte_t *page_table, pmd_t *pmd,
61575 unsigned int flags)
61576 {
61577 - struct page *page;
61578 + struct page *page = NULL;
61579 spinlock_t *ptl;
61580 pte_t entry;
61581
61582 - pte_unmap(page_table);
61583 -
61584 - /* Check if we need to add a guard page to the stack */
61585 - if (check_stack_guard_page(vma, address) < 0)
61586 - return VM_FAULT_SIGBUS;
61587 -
61588 - /* Use the zero-page for reads */
61589 if (!(flags & FAULT_FLAG_WRITE)) {
61590 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61591 vma->vm_page_prot));
61592 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61593 + ptl = pte_lockptr(mm, pmd);
61594 + spin_lock(ptl);
61595 if (!pte_none(*page_table))
61596 goto unlock;
61597 goto setpte;
61598 }
61599
61600 /* Allocate our own private page. */
61601 + pte_unmap(page_table);
61602 +
61603 if (unlikely(anon_vma_prepare(vma)))
61604 goto oom;
61605 page = alloc_zeroed_user_highpage_movable(vma, address);
61606 @@ -3028,6 +3203,11 @@ static int do_anonymous_page(struct mm_s
61607 if (!pte_none(*page_table))
61608 goto release;
61609
61610 +#ifdef CONFIG_PAX_SEGMEXEC
61611 + if (pax_find_mirror_vma(vma))
61612 + BUG_ON(!trylock_page(page));
61613 +#endif
61614 +
61615 inc_mm_counter_fast(mm, MM_ANONPAGES);
61616 page_add_new_anon_rmap(page, vma, address);
61617 setpte:
61618 @@ -3035,6 +3215,12 @@ setpte:
61619
61620 /* No need to invalidate - it was non-present before */
61621 update_mmu_cache(vma, address, page_table);
61622 +
61623 +#ifdef CONFIG_PAX_SEGMEXEC
61624 + if (page)
61625 + pax_mirror_anon_pte(vma, address, page, ptl);
61626 +#endif
61627 +
61628 unlock:
61629 pte_unmap_unlock(page_table, ptl);
61630 return 0;
61631 @@ -3172,6 +3358,12 @@ static int __do_fault(struct mm_struct *
61632 */
61633 /* Only go through if we didn't race with anybody else... */
61634 if (likely(pte_same(*page_table, orig_pte))) {
61635 +
61636 +#ifdef CONFIG_PAX_SEGMEXEC
61637 + if (anon && pax_find_mirror_vma(vma))
61638 + BUG_ON(!trylock_page(page));
61639 +#endif
61640 +
61641 flush_icache_page(vma, page);
61642 entry = mk_pte(page, vma->vm_page_prot);
61643 if (flags & FAULT_FLAG_WRITE)
61644 @@ -3191,6 +3383,14 @@ static int __do_fault(struct mm_struct *
61645
61646 /* no need to invalidate: a not-present page won't be cached */
61647 update_mmu_cache(vma, address, page_table);
61648 +
61649 +#ifdef CONFIG_PAX_SEGMEXEC
61650 + if (anon)
61651 + pax_mirror_anon_pte(vma, address, page, ptl);
61652 + else
61653 + pax_mirror_file_pte(vma, address, page, ptl);
61654 +#endif
61655 +
61656 } else {
61657 if (charged)
61658 mem_cgroup_uncharge_page(page);
61659 @@ -3338,6 +3538,12 @@ int handle_pte_fault(struct mm_struct *m
61660 if (flags & FAULT_FLAG_WRITE)
61661 flush_tlb_fix_spurious_fault(vma, address);
61662 }
61663 +
61664 +#ifdef CONFIG_PAX_SEGMEXEC
61665 + pax_mirror_pte(vma, address, pte, pmd, ptl);
61666 + return 0;
61667 +#endif
61668 +
61669 unlock:
61670 pte_unmap_unlock(pte, ptl);
61671 return 0;
61672 @@ -3354,6 +3560,10 @@ int handle_mm_fault(struct mm_struct *mm
61673 pmd_t *pmd;
61674 pte_t *pte;
61675
61676 +#ifdef CONFIG_PAX_SEGMEXEC
61677 + struct vm_area_struct *vma_m;
61678 +#endif
61679 +
61680 __set_current_state(TASK_RUNNING);
61681
61682 count_vm_event(PGFAULT);
61683 @@ -3364,6 +3574,34 @@ int handle_mm_fault(struct mm_struct *mm
61684 if (unlikely(is_vm_hugetlb_page(vma)))
61685 return hugetlb_fault(mm, vma, address, flags);
61686
61687 +#ifdef CONFIG_PAX_SEGMEXEC
61688 + vma_m = pax_find_mirror_vma(vma);
61689 + if (vma_m) {
61690 + unsigned long address_m;
61691 + pgd_t *pgd_m;
61692 + pud_t *pud_m;
61693 + pmd_t *pmd_m;
61694 +
61695 + if (vma->vm_start > vma_m->vm_start) {
61696 + address_m = address;
61697 + address -= SEGMEXEC_TASK_SIZE;
61698 + vma = vma_m;
61699 + } else
61700 + address_m = address + SEGMEXEC_TASK_SIZE;
61701 +
61702 + pgd_m = pgd_offset(mm, address_m);
61703 + pud_m = pud_alloc(mm, pgd_m, address_m);
61704 + if (!pud_m)
61705 + return VM_FAULT_OOM;
61706 + pmd_m = pmd_alloc(mm, pud_m, address_m);
61707 + if (!pmd_m)
61708 + return VM_FAULT_OOM;
61709 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61710 + return VM_FAULT_OOM;
61711 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61712 + }
61713 +#endif
61714 +
61715 pgd = pgd_offset(mm, address);
61716 pud = pud_alloc(mm, pgd, address);
61717 if (!pud)
61718 @@ -3393,7 +3631,7 @@ int handle_mm_fault(struct mm_struct *mm
61719 * run pte_offset_map on the pmd, if an huge pmd could
61720 * materialize from under us from a different thread.
61721 */
61722 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61723 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61724 return VM_FAULT_OOM;
61725 /* if an huge pmd materialized from under us just retry later */
61726 if (unlikely(pmd_trans_huge(*pmd)))
61727 @@ -3497,7 +3735,7 @@ static int __init gate_vma_init(void)
61728 gate_vma.vm_start = FIXADDR_USER_START;
61729 gate_vma.vm_end = FIXADDR_USER_END;
61730 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61731 - gate_vma.vm_page_prot = __P101;
61732 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61733 /*
61734 * Make sure the vDSO gets into every core dump.
61735 * Dumping its contents makes post-mortem fully interpretable later
61736 diff -urNp linux-2.6.39.4/mm/memory-failure.c linux-2.6.39.4/mm/memory-failure.c
61737 --- linux-2.6.39.4/mm/memory-failure.c 2011-07-09 09:18:51.000000000 -0400
61738 +++ linux-2.6.39.4/mm/memory-failure.c 2011-08-05 19:44:37.000000000 -0400
61739 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61740
61741 int sysctl_memory_failure_recovery __read_mostly = 1;
61742
61743 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61744 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61745
61746 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61747
61748 @@ -1013,7 +1013,7 @@ int __memory_failure(unsigned long pfn,
61749 }
61750
61751 nr_pages = 1 << compound_trans_order(hpage);
61752 - atomic_long_add(nr_pages, &mce_bad_pages);
61753 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61754
61755 /*
61756 * We need/can do nothing about count=0 pages.
61757 @@ -1043,7 +1043,7 @@ int __memory_failure(unsigned long pfn,
61758 if (!PageHWPoison(hpage)
61759 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61760 || (p != hpage && TestSetPageHWPoison(hpage))) {
61761 - atomic_long_sub(nr_pages, &mce_bad_pages);
61762 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61763 return 0;
61764 }
61765 set_page_hwpoison_huge_page(hpage);
61766 @@ -1101,7 +1101,7 @@ int __memory_failure(unsigned long pfn,
61767 }
61768 if (hwpoison_filter(p)) {
61769 if (TestClearPageHWPoison(p))
61770 - atomic_long_sub(nr_pages, &mce_bad_pages);
61771 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61772 unlock_page(hpage);
61773 put_page(hpage);
61774 return 0;
61775 @@ -1227,7 +1227,7 @@ int unpoison_memory(unsigned long pfn)
61776 return 0;
61777 }
61778 if (TestClearPageHWPoison(p))
61779 - atomic_long_sub(nr_pages, &mce_bad_pages);
61780 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61781 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61782 return 0;
61783 }
61784 @@ -1241,7 +1241,7 @@ int unpoison_memory(unsigned long pfn)
61785 */
61786 if (TestClearPageHWPoison(page)) {
61787 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61788 - atomic_long_sub(nr_pages, &mce_bad_pages);
61789 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61790 freeit = 1;
61791 if (PageHuge(page))
61792 clear_page_hwpoison_huge_page(page);
61793 @@ -1354,7 +1354,7 @@ static int soft_offline_huge_page(struct
61794 }
61795 done:
61796 if (!PageHWPoison(hpage))
61797 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61798 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61799 set_page_hwpoison_huge_page(hpage);
61800 dequeue_hwpoisoned_huge_page(hpage);
61801 /* keep elevated page count for bad page */
61802 @@ -1484,7 +1484,7 @@ int soft_offline_page(struct page *page,
61803 return ret;
61804
61805 done:
61806 - atomic_long_add(1, &mce_bad_pages);
61807 + atomic_long_add_unchecked(1, &mce_bad_pages);
61808 SetPageHWPoison(page);
61809 /* keep elevated page count for bad page */
61810 return ret;
61811 diff -urNp linux-2.6.39.4/mm/mempolicy.c linux-2.6.39.4/mm/mempolicy.c
61812 --- linux-2.6.39.4/mm/mempolicy.c 2011-05-19 00:06:34.000000000 -0400
61813 +++ linux-2.6.39.4/mm/mempolicy.c 2011-08-05 19:44:37.000000000 -0400
61814 @@ -643,6 +643,10 @@ static int mbind_range(struct mm_struct
61815 unsigned long vmstart;
61816 unsigned long vmend;
61817
61818 +#ifdef CONFIG_PAX_SEGMEXEC
61819 + struct vm_area_struct *vma_m;
61820 +#endif
61821 +
61822 vma = find_vma_prev(mm, start, &prev);
61823 if (!vma || vma->vm_start > start)
61824 return -EFAULT;
61825 @@ -673,6 +677,16 @@ static int mbind_range(struct mm_struct
61826 err = policy_vma(vma, new_pol);
61827 if (err)
61828 goto out;
61829 +
61830 +#ifdef CONFIG_PAX_SEGMEXEC
61831 + vma_m = pax_find_mirror_vma(vma);
61832 + if (vma_m) {
61833 + err = policy_vma(vma_m, new_pol);
61834 + if (err)
61835 + goto out;
61836 + }
61837 +#endif
61838 +
61839 }
61840
61841 out:
61842 @@ -1106,6 +1120,17 @@ static long do_mbind(unsigned long start
61843
61844 if (end < start)
61845 return -EINVAL;
61846 +
61847 +#ifdef CONFIG_PAX_SEGMEXEC
61848 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61849 + if (end > SEGMEXEC_TASK_SIZE)
61850 + return -EINVAL;
61851 + } else
61852 +#endif
61853 +
61854 + if (end > TASK_SIZE)
61855 + return -EINVAL;
61856 +
61857 if (end == start)
61858 return 0;
61859
61860 @@ -1324,6 +1349,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61861 if (!mm)
61862 goto out;
61863
61864 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61865 + if (mm != current->mm &&
61866 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61867 + err = -EPERM;
61868 + goto out;
61869 + }
61870 +#endif
61871 +
61872 /*
61873 * Check if this process has the right to modify the specified
61874 * process. The right exists if the process has administrative
61875 @@ -1333,8 +1366,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61876 rcu_read_lock();
61877 tcred = __task_cred(task);
61878 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61879 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61880 - !capable(CAP_SYS_NICE)) {
61881 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61882 rcu_read_unlock();
61883 err = -EPERM;
61884 goto out;
61885 @@ -2634,7 +2666,7 @@ int show_numa_map(struct seq_file *m, vo
61886
61887 if (file) {
61888 seq_printf(m, " file=");
61889 - seq_path(m, &file->f_path, "\n\t= ");
61890 + seq_path(m, &file->f_path, "\n\t\\= ");
61891 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
61892 seq_printf(m, " heap");
61893 } else if (vma->vm_start <= mm->start_stack &&
61894 diff -urNp linux-2.6.39.4/mm/migrate.c linux-2.6.39.4/mm/migrate.c
61895 --- linux-2.6.39.4/mm/migrate.c 2011-07-09 09:18:51.000000000 -0400
61896 +++ linux-2.6.39.4/mm/migrate.c 2011-08-05 19:44:37.000000000 -0400
61897 @@ -1133,6 +1133,8 @@ static int do_pages_move(struct mm_struc
61898 unsigned long chunk_start;
61899 int err;
61900
61901 + pax_track_stack();
61902 +
61903 task_nodes = cpuset_mems_allowed(task);
61904
61905 err = -ENOMEM;
61906 @@ -1317,6 +1319,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61907 if (!mm)
61908 return -EINVAL;
61909
61910 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61911 + if (mm != current->mm &&
61912 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61913 + err = -EPERM;
61914 + goto out;
61915 + }
61916 +#endif
61917 +
61918 /*
61919 * Check if this process has the right to modify the specified
61920 * process. The right exists if the process has administrative
61921 @@ -1326,8 +1336,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61922 rcu_read_lock();
61923 tcred = __task_cred(task);
61924 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61925 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61926 - !capable(CAP_SYS_NICE)) {
61927 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61928 rcu_read_unlock();
61929 err = -EPERM;
61930 goto out;
61931 diff -urNp linux-2.6.39.4/mm/mlock.c linux-2.6.39.4/mm/mlock.c
61932 --- linux-2.6.39.4/mm/mlock.c 2011-05-19 00:06:34.000000000 -0400
61933 +++ linux-2.6.39.4/mm/mlock.c 2011-08-05 19:44:37.000000000 -0400
61934 @@ -13,6 +13,7 @@
61935 #include <linux/pagemap.h>
61936 #include <linux/mempolicy.h>
61937 #include <linux/syscalls.h>
61938 +#include <linux/security.h>
61939 #include <linux/sched.h>
61940 #include <linux/module.h>
61941 #include <linux/rmap.h>
61942 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61943 return -EINVAL;
61944 if (end == start)
61945 return 0;
61946 + if (end > TASK_SIZE)
61947 + return -EINVAL;
61948 +
61949 vma = find_vma_prev(current->mm, start, &prev);
61950 if (!vma || vma->vm_start > start)
61951 return -ENOMEM;
61952 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61953 for (nstart = start ; ; ) {
61954 unsigned int newflags;
61955
61956 +#ifdef CONFIG_PAX_SEGMEXEC
61957 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61958 + break;
61959 +#endif
61960 +
61961 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61962
61963 newflags = vma->vm_flags | VM_LOCKED;
61964 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61965 lock_limit >>= PAGE_SHIFT;
61966
61967 /* check against resource limits */
61968 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61969 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61970 error = do_mlock(start, len, 1);
61971 up_write(&current->mm->mmap_sem);
61972 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61973 static int do_mlockall(int flags)
61974 {
61975 struct vm_area_struct * vma, * prev = NULL;
61976 - unsigned int def_flags = 0;
61977
61978 if (flags & MCL_FUTURE)
61979 - def_flags = VM_LOCKED;
61980 - current->mm->def_flags = def_flags;
61981 + current->mm->def_flags |= VM_LOCKED;
61982 + else
61983 + current->mm->def_flags &= ~VM_LOCKED;
61984 if (flags == MCL_FUTURE)
61985 goto out;
61986
61987 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61988 - unsigned int newflags;
61989 + unsigned long newflags;
61990 +
61991 +#ifdef CONFIG_PAX_SEGMEXEC
61992 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61993 + break;
61994 +#endif
61995
61996 + BUG_ON(vma->vm_end > TASK_SIZE);
61997 newflags = vma->vm_flags | VM_LOCKED;
61998 if (!(flags & MCL_CURRENT))
61999 newflags &= ~VM_LOCKED;
62000 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
62001 lock_limit >>= PAGE_SHIFT;
62002
62003 ret = -ENOMEM;
62004 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
62005 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
62006 capable(CAP_IPC_LOCK))
62007 ret = do_mlockall(flags);
62008 diff -urNp linux-2.6.39.4/mm/mmap.c linux-2.6.39.4/mm/mmap.c
62009 --- linux-2.6.39.4/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
62010 +++ linux-2.6.39.4/mm/mmap.c 2011-08-05 20:34:06.000000000 -0400
62011 @@ -46,6 +46,16 @@
62012 #define arch_rebalance_pgtables(addr, len) (addr)
62013 #endif
62014
62015 +static inline void verify_mm_writelocked(struct mm_struct *mm)
62016 +{
62017 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
62018 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62019 + up_read(&mm->mmap_sem);
62020 + BUG();
62021 + }
62022 +#endif
62023 +}
62024 +
62025 static void unmap_region(struct mm_struct *mm,
62026 struct vm_area_struct *vma, struct vm_area_struct *prev,
62027 unsigned long start, unsigned long end);
62028 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
62029 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
62030 *
62031 */
62032 -pgprot_t protection_map[16] = {
62033 +pgprot_t protection_map[16] __read_only = {
62034 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
62035 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
62036 };
62037
62038 pgprot_t vm_get_page_prot(unsigned long vm_flags)
62039 {
62040 - return __pgprot(pgprot_val(protection_map[vm_flags &
62041 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
62042 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
62043 pgprot_val(arch_vm_get_page_prot(vm_flags)));
62044 +
62045 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62046 + if (!(__supported_pte_mask & _PAGE_NX) &&
62047 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
62048 + (vm_flags & (VM_READ | VM_WRITE)))
62049 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
62050 +#endif
62051 +
62052 + return prot;
62053 }
62054 EXPORT_SYMBOL(vm_get_page_prot);
62055
62056 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
62057 int sysctl_overcommit_ratio = 50; /* default is 50% */
62058 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
62059 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
62060 struct percpu_counter vm_committed_as;
62061
62062 /*
62063 @@ -232,6 +252,7 @@ static struct vm_area_struct *remove_vma
62064 struct vm_area_struct *next = vma->vm_next;
62065
62066 might_sleep();
62067 + BUG_ON(vma->vm_mirror);
62068 if (vma->vm_ops && vma->vm_ops->close)
62069 vma->vm_ops->close(vma);
62070 if (vma->vm_file) {
62071 @@ -276,6 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
62072 * not page aligned -Ram Gupta
62073 */
62074 rlim = rlimit(RLIMIT_DATA);
62075 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
62076 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
62077 (mm->end_data - mm->start_data) > rlim)
62078 goto out;
62079 @@ -719,6 +741,12 @@ static int
62080 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
62081 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62082 {
62083 +
62084 +#ifdef CONFIG_PAX_SEGMEXEC
62085 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
62086 + return 0;
62087 +#endif
62088 +
62089 if (is_mergeable_vma(vma, file, vm_flags) &&
62090 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62091 if (vma->vm_pgoff == vm_pgoff)
62092 @@ -738,6 +766,12 @@ static int
62093 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
62094 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62095 {
62096 +
62097 +#ifdef CONFIG_PAX_SEGMEXEC
62098 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
62099 + return 0;
62100 +#endif
62101 +
62102 if (is_mergeable_vma(vma, file, vm_flags) &&
62103 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62104 pgoff_t vm_pglen;
62105 @@ -780,13 +814,20 @@ can_vma_merge_after(struct vm_area_struc
62106 struct vm_area_struct *vma_merge(struct mm_struct *mm,
62107 struct vm_area_struct *prev, unsigned long addr,
62108 unsigned long end, unsigned long vm_flags,
62109 - struct anon_vma *anon_vma, struct file *file,
62110 + struct anon_vma *anon_vma, struct file *file,
62111 pgoff_t pgoff, struct mempolicy *policy)
62112 {
62113 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
62114 struct vm_area_struct *area, *next;
62115 int err;
62116
62117 +#ifdef CONFIG_PAX_SEGMEXEC
62118 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
62119 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
62120 +
62121 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
62122 +#endif
62123 +
62124 /*
62125 * We later require that vma->vm_flags == vm_flags,
62126 * so this tests vma->vm_flags & VM_SPECIAL, too.
62127 @@ -802,6 +843,15 @@ struct vm_area_struct *vma_merge(struct
62128 if (next && next->vm_end == end) /* cases 6, 7, 8 */
62129 next = next->vm_next;
62130
62131 +#ifdef CONFIG_PAX_SEGMEXEC
62132 + if (prev)
62133 + prev_m = pax_find_mirror_vma(prev);
62134 + if (area)
62135 + area_m = pax_find_mirror_vma(area);
62136 + if (next)
62137 + next_m = pax_find_mirror_vma(next);
62138 +#endif
62139 +
62140 /*
62141 * Can it merge with the predecessor?
62142 */
62143 @@ -821,9 +871,24 @@ struct vm_area_struct *vma_merge(struct
62144 /* cases 1, 6 */
62145 err = vma_adjust(prev, prev->vm_start,
62146 next->vm_end, prev->vm_pgoff, NULL);
62147 - } else /* cases 2, 5, 7 */
62148 +
62149 +#ifdef CONFIG_PAX_SEGMEXEC
62150 + if (!err && prev_m)
62151 + err = vma_adjust(prev_m, prev_m->vm_start,
62152 + next_m->vm_end, prev_m->vm_pgoff, NULL);
62153 +#endif
62154 +
62155 + } else { /* cases 2, 5, 7 */
62156 err = vma_adjust(prev, prev->vm_start,
62157 end, prev->vm_pgoff, NULL);
62158 +
62159 +#ifdef CONFIG_PAX_SEGMEXEC
62160 + if (!err && prev_m)
62161 + err = vma_adjust(prev_m, prev_m->vm_start,
62162 + end_m, prev_m->vm_pgoff, NULL);
62163 +#endif
62164 +
62165 + }
62166 if (err)
62167 return NULL;
62168 khugepaged_enter_vma_merge(prev);
62169 @@ -837,12 +902,27 @@ struct vm_area_struct *vma_merge(struct
62170 mpol_equal(policy, vma_policy(next)) &&
62171 can_vma_merge_before(next, vm_flags,
62172 anon_vma, file, pgoff+pglen)) {
62173 - if (prev && addr < prev->vm_end) /* case 4 */
62174 + if (prev && addr < prev->vm_end) { /* case 4 */
62175 err = vma_adjust(prev, prev->vm_start,
62176 addr, prev->vm_pgoff, NULL);
62177 - else /* cases 3, 8 */
62178 +
62179 +#ifdef CONFIG_PAX_SEGMEXEC
62180 + if (!err && prev_m)
62181 + err = vma_adjust(prev_m, prev_m->vm_start,
62182 + addr_m, prev_m->vm_pgoff, NULL);
62183 +#endif
62184 +
62185 + } else { /* cases 3, 8 */
62186 err = vma_adjust(area, addr, next->vm_end,
62187 next->vm_pgoff - pglen, NULL);
62188 +
62189 +#ifdef CONFIG_PAX_SEGMEXEC
62190 + if (!err && area_m)
62191 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
62192 + next_m->vm_pgoff - pglen, NULL);
62193 +#endif
62194 +
62195 + }
62196 if (err)
62197 return NULL;
62198 khugepaged_enter_vma_merge(area);
62199 @@ -958,14 +1038,11 @@ none:
62200 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
62201 struct file *file, long pages)
62202 {
62203 - const unsigned long stack_flags
62204 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
62205 -
62206 if (file) {
62207 mm->shared_vm += pages;
62208 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
62209 mm->exec_vm += pages;
62210 - } else if (flags & stack_flags)
62211 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
62212 mm->stack_vm += pages;
62213 if (flags & (VM_RESERVED|VM_IO))
62214 mm->reserved_vm += pages;
62215 @@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
62216 * (the exception is when the underlying filesystem is noexec
62217 * mounted, in which case we dont add PROT_EXEC.)
62218 */
62219 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
62220 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
62221 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
62222 prot |= PROT_EXEC;
62223
62224 @@ -1018,7 +1095,7 @@ unsigned long do_mmap_pgoff(struct file
62225 /* Obtain the address to map to. we verify (or select) it and ensure
62226 * that it represents a valid section of the address space.
62227 */
62228 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
62229 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
62230 if (addr & ~PAGE_MASK)
62231 return addr;
62232
62233 @@ -1029,6 +1106,36 @@ unsigned long do_mmap_pgoff(struct file
62234 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
62235 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
62236
62237 +#ifdef CONFIG_PAX_MPROTECT
62238 + if (mm->pax_flags & MF_PAX_MPROTECT) {
62239 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
62240 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
62241 + gr_log_rwxmmap(file);
62242 +
62243 +#ifdef CONFIG_PAX_EMUPLT
62244 + vm_flags &= ~VM_EXEC;
62245 +#else
62246 + return -EPERM;
62247 +#endif
62248 +
62249 + }
62250 +
62251 + if (!(vm_flags & VM_EXEC))
62252 + vm_flags &= ~VM_MAYEXEC;
62253 +#else
62254 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62255 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62256 +#endif
62257 + else
62258 + vm_flags &= ~VM_MAYWRITE;
62259 + }
62260 +#endif
62261 +
62262 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62263 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
62264 + vm_flags &= ~VM_PAGEEXEC;
62265 +#endif
62266 +
62267 if (flags & MAP_LOCKED)
62268 if (!can_do_mlock())
62269 return -EPERM;
62270 @@ -1040,6 +1147,7 @@ unsigned long do_mmap_pgoff(struct file
62271 locked += mm->locked_vm;
62272 lock_limit = rlimit(RLIMIT_MEMLOCK);
62273 lock_limit >>= PAGE_SHIFT;
62274 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62275 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
62276 return -EAGAIN;
62277 }
62278 @@ -1110,6 +1218,9 @@ unsigned long do_mmap_pgoff(struct file
62279 if (error)
62280 return error;
62281
62282 + if (!gr_acl_handle_mmap(file, prot))
62283 + return -EACCES;
62284 +
62285 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
62286 }
62287 EXPORT_SYMBOL(do_mmap_pgoff);
62288 @@ -1187,10 +1298,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
62289 */
62290 int vma_wants_writenotify(struct vm_area_struct *vma)
62291 {
62292 - unsigned int vm_flags = vma->vm_flags;
62293 + unsigned long vm_flags = vma->vm_flags;
62294
62295 /* If it was private or non-writable, the write bit is already clear */
62296 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
62297 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
62298 return 0;
62299
62300 /* The backer wishes to know when pages are first written to? */
62301 @@ -1239,14 +1350,24 @@ unsigned long mmap_region(struct file *f
62302 unsigned long charged = 0;
62303 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
62304
62305 +#ifdef CONFIG_PAX_SEGMEXEC
62306 + struct vm_area_struct *vma_m = NULL;
62307 +#endif
62308 +
62309 + /*
62310 + * mm->mmap_sem is required to protect against another thread
62311 + * changing the mappings in case we sleep.
62312 + */
62313 + verify_mm_writelocked(mm);
62314 +
62315 /* Clear old maps */
62316 error = -ENOMEM;
62317 -munmap_back:
62318 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62319 if (vma && vma->vm_start < addr + len) {
62320 if (do_munmap(mm, addr, len))
62321 return -ENOMEM;
62322 - goto munmap_back;
62323 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62324 + BUG_ON(vma && vma->vm_start < addr + len);
62325 }
62326
62327 /* Check against address space limit. */
62328 @@ -1295,6 +1416,16 @@ munmap_back:
62329 goto unacct_error;
62330 }
62331
62332 +#ifdef CONFIG_PAX_SEGMEXEC
62333 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
62334 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62335 + if (!vma_m) {
62336 + error = -ENOMEM;
62337 + goto free_vma;
62338 + }
62339 + }
62340 +#endif
62341 +
62342 vma->vm_mm = mm;
62343 vma->vm_start = addr;
62344 vma->vm_end = addr + len;
62345 @@ -1318,6 +1449,19 @@ munmap_back:
62346 error = file->f_op->mmap(file, vma);
62347 if (error)
62348 goto unmap_and_free_vma;
62349 +
62350 +#ifdef CONFIG_PAX_SEGMEXEC
62351 + if (vma_m && (vm_flags & VM_EXECUTABLE))
62352 + added_exe_file_vma(mm);
62353 +#endif
62354 +
62355 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62356 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
62357 + vma->vm_flags |= VM_PAGEEXEC;
62358 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62359 + }
62360 +#endif
62361 +
62362 if (vm_flags & VM_EXECUTABLE)
62363 added_exe_file_vma(mm);
62364
62365 @@ -1353,6 +1497,11 @@ munmap_back:
62366 vma_link(mm, vma, prev, rb_link, rb_parent);
62367 file = vma->vm_file;
62368
62369 +#ifdef CONFIG_PAX_SEGMEXEC
62370 + if (vma_m)
62371 + BUG_ON(pax_mirror_vma(vma_m, vma));
62372 +#endif
62373 +
62374 /* Once vma denies write, undo our temporary denial count */
62375 if (correct_wcount)
62376 atomic_inc(&inode->i_writecount);
62377 @@ -1361,6 +1510,7 @@ out:
62378
62379 mm->total_vm += len >> PAGE_SHIFT;
62380 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62381 + track_exec_limit(mm, addr, addr + len, vm_flags);
62382 if (vm_flags & VM_LOCKED) {
62383 if (!mlock_vma_pages_range(vma, addr, addr + len))
62384 mm->locked_vm += (len >> PAGE_SHIFT);
62385 @@ -1378,6 +1528,12 @@ unmap_and_free_vma:
62386 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62387 charged = 0;
62388 free_vma:
62389 +
62390 +#ifdef CONFIG_PAX_SEGMEXEC
62391 + if (vma_m)
62392 + kmem_cache_free(vm_area_cachep, vma_m);
62393 +#endif
62394 +
62395 kmem_cache_free(vm_area_cachep, vma);
62396 unacct_error:
62397 if (charged)
62398 @@ -1385,6 +1541,44 @@ unacct_error:
62399 return error;
62400 }
62401
62402 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62403 +{
62404 + if (!vma) {
62405 +#ifdef CONFIG_STACK_GROWSUP
62406 + if (addr > sysctl_heap_stack_gap)
62407 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62408 + else
62409 + vma = find_vma(current->mm, 0);
62410 + if (vma && (vma->vm_flags & VM_GROWSUP))
62411 + return false;
62412 +#endif
62413 + return true;
62414 + }
62415 +
62416 + if (addr + len > vma->vm_start)
62417 + return false;
62418 +
62419 + if (vma->vm_flags & VM_GROWSDOWN)
62420 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62421 +#ifdef CONFIG_STACK_GROWSUP
62422 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62423 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62424 +#endif
62425 +
62426 + return true;
62427 +}
62428 +
62429 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62430 +{
62431 + if (vma->vm_start < len)
62432 + return -ENOMEM;
62433 + if (!(vma->vm_flags & VM_GROWSDOWN))
62434 + return vma->vm_start - len;
62435 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
62436 + return vma->vm_start - len - sysctl_heap_stack_gap;
62437 + return -ENOMEM;
62438 +}
62439 +
62440 /* Get an address range which is currently unmapped.
62441 * For shmat() with addr=0.
62442 *
62443 @@ -1411,18 +1605,23 @@ arch_get_unmapped_area(struct file *filp
62444 if (flags & MAP_FIXED)
62445 return addr;
62446
62447 +#ifdef CONFIG_PAX_RANDMMAP
62448 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62449 +#endif
62450 +
62451 if (addr) {
62452 addr = PAGE_ALIGN(addr);
62453 - vma = find_vma(mm, addr);
62454 - if (TASK_SIZE - len >= addr &&
62455 - (!vma || addr + len <= vma->vm_start))
62456 - return addr;
62457 + if (TASK_SIZE - len >= addr) {
62458 + vma = find_vma(mm, addr);
62459 + if (check_heap_stack_gap(vma, addr, len))
62460 + return addr;
62461 + }
62462 }
62463 if (len > mm->cached_hole_size) {
62464 - start_addr = addr = mm->free_area_cache;
62465 + start_addr = addr = mm->free_area_cache;
62466 } else {
62467 - start_addr = addr = TASK_UNMAPPED_BASE;
62468 - mm->cached_hole_size = 0;
62469 + start_addr = addr = mm->mmap_base;
62470 + mm->cached_hole_size = 0;
62471 }
62472
62473 full_search:
62474 @@ -1433,34 +1632,40 @@ full_search:
62475 * Start a new search - just in case we missed
62476 * some holes.
62477 */
62478 - if (start_addr != TASK_UNMAPPED_BASE) {
62479 - addr = TASK_UNMAPPED_BASE;
62480 - start_addr = addr;
62481 + if (start_addr != mm->mmap_base) {
62482 + start_addr = addr = mm->mmap_base;
62483 mm->cached_hole_size = 0;
62484 goto full_search;
62485 }
62486 return -ENOMEM;
62487 }
62488 - if (!vma || addr + len <= vma->vm_start) {
62489 - /*
62490 - * Remember the place where we stopped the search:
62491 - */
62492 - mm->free_area_cache = addr + len;
62493 - return addr;
62494 - }
62495 + if (check_heap_stack_gap(vma, addr, len))
62496 + break;
62497 if (addr + mm->cached_hole_size < vma->vm_start)
62498 mm->cached_hole_size = vma->vm_start - addr;
62499 addr = vma->vm_end;
62500 }
62501 +
62502 + /*
62503 + * Remember the place where we stopped the search:
62504 + */
62505 + mm->free_area_cache = addr + len;
62506 + return addr;
62507 }
62508 #endif
62509
62510 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62511 {
62512 +
62513 +#ifdef CONFIG_PAX_SEGMEXEC
62514 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62515 + return;
62516 +#endif
62517 +
62518 /*
62519 * Is this a new hole at the lowest possible address?
62520 */
62521 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62522 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62523 mm->free_area_cache = addr;
62524 mm->cached_hole_size = ~0UL;
62525 }
62526 @@ -1478,7 +1683,7 @@ arch_get_unmapped_area_topdown(struct fi
62527 {
62528 struct vm_area_struct *vma;
62529 struct mm_struct *mm = current->mm;
62530 - unsigned long addr = addr0;
62531 + unsigned long base = mm->mmap_base, addr = addr0;
62532
62533 /* requested length too big for entire address space */
62534 if (len > TASK_SIZE)
62535 @@ -1487,13 +1692,18 @@ arch_get_unmapped_area_topdown(struct fi
62536 if (flags & MAP_FIXED)
62537 return addr;
62538
62539 +#ifdef CONFIG_PAX_RANDMMAP
62540 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62541 +#endif
62542 +
62543 /* requesting a specific address */
62544 if (addr) {
62545 addr = PAGE_ALIGN(addr);
62546 - vma = find_vma(mm, addr);
62547 - if (TASK_SIZE - len >= addr &&
62548 - (!vma || addr + len <= vma->vm_start))
62549 - return addr;
62550 + if (TASK_SIZE - len >= addr) {
62551 + vma = find_vma(mm, addr);
62552 + if (check_heap_stack_gap(vma, addr, len))
62553 + return addr;
62554 + }
62555 }
62556
62557 /* check if free_area_cache is useful for us */
62558 @@ -1508,7 +1718,7 @@ arch_get_unmapped_area_topdown(struct fi
62559 /* make sure it can fit in the remaining address space */
62560 if (addr > len) {
62561 vma = find_vma(mm, addr-len);
62562 - if (!vma || addr <= vma->vm_start)
62563 + if (check_heap_stack_gap(vma, addr - len, len))
62564 /* remember the address as a hint for next time */
62565 return (mm->free_area_cache = addr-len);
62566 }
62567 @@ -1525,7 +1735,7 @@ arch_get_unmapped_area_topdown(struct fi
62568 * return with success:
62569 */
62570 vma = find_vma(mm, addr);
62571 - if (!vma || addr+len <= vma->vm_start)
62572 + if (check_heap_stack_gap(vma, addr, len))
62573 /* remember the address as a hint for next time */
62574 return (mm->free_area_cache = addr);
62575
62576 @@ -1534,8 +1744,8 @@ arch_get_unmapped_area_topdown(struct fi
62577 mm->cached_hole_size = vma->vm_start - addr;
62578
62579 /* try just below the current vma->vm_start */
62580 - addr = vma->vm_start-len;
62581 - } while (len < vma->vm_start);
62582 + addr = skip_heap_stack_gap(vma, len);
62583 + } while (!IS_ERR_VALUE(addr));
62584
62585 bottomup:
62586 /*
62587 @@ -1544,13 +1754,21 @@ bottomup:
62588 * can happen with large stack limits and large mmap()
62589 * allocations.
62590 */
62591 + mm->mmap_base = TASK_UNMAPPED_BASE;
62592 +
62593 +#ifdef CONFIG_PAX_RANDMMAP
62594 + if (mm->pax_flags & MF_PAX_RANDMMAP)
62595 + mm->mmap_base += mm->delta_mmap;
62596 +#endif
62597 +
62598 + mm->free_area_cache = mm->mmap_base;
62599 mm->cached_hole_size = ~0UL;
62600 - mm->free_area_cache = TASK_UNMAPPED_BASE;
62601 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62602 /*
62603 * Restore the topdown base:
62604 */
62605 - mm->free_area_cache = mm->mmap_base;
62606 + mm->mmap_base = base;
62607 + mm->free_area_cache = base;
62608 mm->cached_hole_size = ~0UL;
62609
62610 return addr;
62611 @@ -1559,6 +1777,12 @@ bottomup:
62612
62613 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62614 {
62615 +
62616 +#ifdef CONFIG_PAX_SEGMEXEC
62617 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62618 + return;
62619 +#endif
62620 +
62621 /*
62622 * Is this a new hole at the highest possible address?
62623 */
62624 @@ -1566,8 +1790,10 @@ void arch_unmap_area_topdown(struct mm_s
62625 mm->free_area_cache = addr;
62626
62627 /* dont allow allocations above current base */
62628 - if (mm->free_area_cache > mm->mmap_base)
62629 + if (mm->free_area_cache > mm->mmap_base) {
62630 mm->free_area_cache = mm->mmap_base;
62631 + mm->cached_hole_size = ~0UL;
62632 + }
62633 }
62634
62635 unsigned long
62636 @@ -1675,6 +1901,28 @@ out:
62637 return prev ? prev->vm_next : vma;
62638 }
62639
62640 +#ifdef CONFIG_PAX_SEGMEXEC
62641 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62642 +{
62643 + struct vm_area_struct *vma_m;
62644 +
62645 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62646 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62647 + BUG_ON(vma->vm_mirror);
62648 + return NULL;
62649 + }
62650 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62651 + vma_m = vma->vm_mirror;
62652 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62653 + BUG_ON(vma->vm_file != vma_m->vm_file);
62654 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62655 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62656 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62657 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62658 + return vma_m;
62659 +}
62660 +#endif
62661 +
62662 /*
62663 * Verify that the stack growth is acceptable and
62664 * update accounting. This is shared with both the
62665 @@ -1691,6 +1939,7 @@ static int acct_stack_growth(struct vm_a
62666 return -ENOMEM;
62667
62668 /* Stack limit test */
62669 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
62670 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62671 return -ENOMEM;
62672
62673 @@ -1701,6 +1950,7 @@ static int acct_stack_growth(struct vm_a
62674 locked = mm->locked_vm + grow;
62675 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62676 limit >>= PAGE_SHIFT;
62677 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62678 if (locked > limit && !capable(CAP_IPC_LOCK))
62679 return -ENOMEM;
62680 }
62681 @@ -1731,37 +1981,48 @@ static int acct_stack_growth(struct vm_a
62682 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62683 * vma is the last one with address > vma->vm_end. Have to extend vma.
62684 */
62685 +#ifndef CONFIG_IA64
62686 +static
62687 +#endif
62688 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62689 {
62690 int error;
62691 + bool locknext;
62692
62693 if (!(vma->vm_flags & VM_GROWSUP))
62694 return -EFAULT;
62695
62696 + /* Also guard against wrapping around to address 0. */
62697 + if (address < PAGE_ALIGN(address+1))
62698 + address = PAGE_ALIGN(address+1);
62699 + else
62700 + return -ENOMEM;
62701 +
62702 /*
62703 * We must make sure the anon_vma is allocated
62704 * so that the anon_vma locking is not a noop.
62705 */
62706 if (unlikely(anon_vma_prepare(vma)))
62707 return -ENOMEM;
62708 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62709 + if (locknext && anon_vma_prepare(vma->vm_next))
62710 + return -ENOMEM;
62711 vma_lock_anon_vma(vma);
62712 + if (locknext)
62713 + vma_lock_anon_vma(vma->vm_next);
62714
62715 /*
62716 * vma->vm_start/vm_end cannot change under us because the caller
62717 * is required to hold the mmap_sem in read mode. We need the
62718 - * anon_vma lock to serialize against concurrent expand_stacks.
62719 - * Also guard against wrapping around to address 0.
62720 + * anon_vma locks to serialize against concurrent expand_stacks
62721 + * and expand_upwards.
62722 */
62723 - if (address < PAGE_ALIGN(address+4))
62724 - address = PAGE_ALIGN(address+4);
62725 - else {
62726 - vma_unlock_anon_vma(vma);
62727 - return -ENOMEM;
62728 - }
62729 error = 0;
62730
62731 /* Somebody else might have raced and expanded it already */
62732 - if (address > vma->vm_end) {
62733 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62734 + error = -ENOMEM;
62735 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62736 unsigned long size, grow;
62737
62738 size = address - vma->vm_start;
62739 @@ -1776,6 +2037,8 @@ int expand_upwards(struct vm_area_struct
62740 }
62741 }
62742 }
62743 + if (locknext)
62744 + vma_unlock_anon_vma(vma->vm_next);
62745 vma_unlock_anon_vma(vma);
62746 khugepaged_enter_vma_merge(vma);
62747 return error;
62748 @@ -1789,6 +2052,8 @@ static int expand_downwards(struct vm_ar
62749 unsigned long address)
62750 {
62751 int error;
62752 + bool lockprev = false;
62753 + struct vm_area_struct *prev;
62754
62755 /*
62756 * We must make sure the anon_vma is allocated
62757 @@ -1802,6 +2067,15 @@ static int expand_downwards(struct vm_ar
62758 if (error)
62759 return error;
62760
62761 + prev = vma->vm_prev;
62762 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62763 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62764 +#endif
62765 + if (lockprev && anon_vma_prepare(prev))
62766 + return -ENOMEM;
62767 + if (lockprev)
62768 + vma_lock_anon_vma(prev);
62769 +
62770 vma_lock_anon_vma(vma);
62771
62772 /*
62773 @@ -1811,9 +2085,17 @@ static int expand_downwards(struct vm_ar
62774 */
62775
62776 /* Somebody else might have raced and expanded it already */
62777 - if (address < vma->vm_start) {
62778 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62779 + error = -ENOMEM;
62780 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62781 unsigned long size, grow;
62782
62783 +#ifdef CONFIG_PAX_SEGMEXEC
62784 + struct vm_area_struct *vma_m;
62785 +
62786 + vma_m = pax_find_mirror_vma(vma);
62787 +#endif
62788 +
62789 size = vma->vm_end - address;
62790 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62791
62792 @@ -1823,11 +2105,22 @@ static int expand_downwards(struct vm_ar
62793 if (!error) {
62794 vma->vm_start = address;
62795 vma->vm_pgoff -= grow;
62796 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62797 +
62798 +#ifdef CONFIG_PAX_SEGMEXEC
62799 + if (vma_m) {
62800 + vma_m->vm_start -= grow << PAGE_SHIFT;
62801 + vma_m->vm_pgoff -= grow;
62802 + }
62803 +#endif
62804 +
62805 perf_event_mmap(vma);
62806 }
62807 }
62808 }
62809 vma_unlock_anon_vma(vma);
62810 + if (lockprev)
62811 + vma_unlock_anon_vma(prev);
62812 khugepaged_enter_vma_merge(vma);
62813 return error;
62814 }
62815 @@ -1902,6 +2195,13 @@ static void remove_vma_list(struct mm_st
62816 do {
62817 long nrpages = vma_pages(vma);
62818
62819 +#ifdef CONFIG_PAX_SEGMEXEC
62820 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62821 + vma = remove_vma(vma);
62822 + continue;
62823 + }
62824 +#endif
62825 +
62826 mm->total_vm -= nrpages;
62827 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62828 vma = remove_vma(vma);
62829 @@ -1947,6 +2247,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62830 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62831 vma->vm_prev = NULL;
62832 do {
62833 +
62834 +#ifdef CONFIG_PAX_SEGMEXEC
62835 + if (vma->vm_mirror) {
62836 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62837 + vma->vm_mirror->vm_mirror = NULL;
62838 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
62839 + vma->vm_mirror = NULL;
62840 + }
62841 +#endif
62842 +
62843 rb_erase(&vma->vm_rb, &mm->mm_rb);
62844 mm->map_count--;
62845 tail_vma = vma;
62846 @@ -1975,14 +2285,33 @@ static int __split_vma(struct mm_struct
62847 struct vm_area_struct *new;
62848 int err = -ENOMEM;
62849
62850 +#ifdef CONFIG_PAX_SEGMEXEC
62851 + struct vm_area_struct *vma_m, *new_m = NULL;
62852 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62853 +#endif
62854 +
62855 if (is_vm_hugetlb_page(vma) && (addr &
62856 ~(huge_page_mask(hstate_vma(vma)))))
62857 return -EINVAL;
62858
62859 +#ifdef CONFIG_PAX_SEGMEXEC
62860 + vma_m = pax_find_mirror_vma(vma);
62861 +#endif
62862 +
62863 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62864 if (!new)
62865 goto out_err;
62866
62867 +#ifdef CONFIG_PAX_SEGMEXEC
62868 + if (vma_m) {
62869 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62870 + if (!new_m) {
62871 + kmem_cache_free(vm_area_cachep, new);
62872 + goto out_err;
62873 + }
62874 + }
62875 +#endif
62876 +
62877 /* most fields are the same, copy all, and then fixup */
62878 *new = *vma;
62879
62880 @@ -1995,6 +2324,22 @@ static int __split_vma(struct mm_struct
62881 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62882 }
62883
62884 +#ifdef CONFIG_PAX_SEGMEXEC
62885 + if (vma_m) {
62886 + *new_m = *vma_m;
62887 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
62888 + new_m->vm_mirror = new;
62889 + new->vm_mirror = new_m;
62890 +
62891 + if (new_below)
62892 + new_m->vm_end = addr_m;
62893 + else {
62894 + new_m->vm_start = addr_m;
62895 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62896 + }
62897 + }
62898 +#endif
62899 +
62900 pol = mpol_dup(vma_policy(vma));
62901 if (IS_ERR(pol)) {
62902 err = PTR_ERR(pol);
62903 @@ -2020,6 +2365,42 @@ static int __split_vma(struct mm_struct
62904 else
62905 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62906
62907 +#ifdef CONFIG_PAX_SEGMEXEC
62908 + if (!err && vma_m) {
62909 + if (anon_vma_clone(new_m, vma_m))
62910 + goto out_free_mpol;
62911 +
62912 + mpol_get(pol);
62913 + vma_set_policy(new_m, pol);
62914 +
62915 + if (new_m->vm_file) {
62916 + get_file(new_m->vm_file);
62917 + if (vma_m->vm_flags & VM_EXECUTABLE)
62918 + added_exe_file_vma(mm);
62919 + }
62920 +
62921 + if (new_m->vm_ops && new_m->vm_ops->open)
62922 + new_m->vm_ops->open(new_m);
62923 +
62924 + if (new_below)
62925 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62926 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62927 + else
62928 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62929 +
62930 + if (err) {
62931 + if (new_m->vm_ops && new_m->vm_ops->close)
62932 + new_m->vm_ops->close(new_m);
62933 + if (new_m->vm_file) {
62934 + if (vma_m->vm_flags & VM_EXECUTABLE)
62935 + removed_exe_file_vma(mm);
62936 + fput(new_m->vm_file);
62937 + }
62938 + mpol_put(pol);
62939 + }
62940 + }
62941 +#endif
62942 +
62943 /* Success. */
62944 if (!err)
62945 return 0;
62946 @@ -2032,10 +2413,18 @@ static int __split_vma(struct mm_struct
62947 removed_exe_file_vma(mm);
62948 fput(new->vm_file);
62949 }
62950 - unlink_anon_vmas(new);
62951 out_free_mpol:
62952 mpol_put(pol);
62953 out_free_vma:
62954 +
62955 +#ifdef CONFIG_PAX_SEGMEXEC
62956 + if (new_m) {
62957 + unlink_anon_vmas(new_m);
62958 + kmem_cache_free(vm_area_cachep, new_m);
62959 + }
62960 +#endif
62961 +
62962 + unlink_anon_vmas(new);
62963 kmem_cache_free(vm_area_cachep, new);
62964 out_err:
62965 return err;
62966 @@ -2048,6 +2437,15 @@ static int __split_vma(struct mm_struct
62967 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62968 unsigned long addr, int new_below)
62969 {
62970 +
62971 +#ifdef CONFIG_PAX_SEGMEXEC
62972 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62973 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62974 + if (mm->map_count >= sysctl_max_map_count-1)
62975 + return -ENOMEM;
62976 + } else
62977 +#endif
62978 +
62979 if (mm->map_count >= sysctl_max_map_count)
62980 return -ENOMEM;
62981
62982 @@ -2059,11 +2457,30 @@ int split_vma(struct mm_struct *mm, stru
62983 * work. This now handles partial unmappings.
62984 * Jeremy Fitzhardinge <jeremy@goop.org>
62985 */
62986 +#ifdef CONFIG_PAX_SEGMEXEC
62987 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62988 {
62989 + int ret = __do_munmap(mm, start, len);
62990 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62991 + return ret;
62992 +
62993 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62994 +}
62995 +
62996 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62997 +#else
62998 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62999 +#endif
63000 +{
63001 unsigned long end;
63002 struct vm_area_struct *vma, *prev, *last;
63003
63004 + /*
63005 + * mm->mmap_sem is required to protect against another thread
63006 + * changing the mappings in case we sleep.
63007 + */
63008 + verify_mm_writelocked(mm);
63009 +
63010 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
63011 return -EINVAL;
63012
63013 @@ -2137,6 +2554,8 @@ int do_munmap(struct mm_struct *mm, unsi
63014 /* Fix up all other VM information */
63015 remove_vma_list(mm, vma);
63016
63017 + track_exec_limit(mm, start, end, 0UL);
63018 +
63019 return 0;
63020 }
63021
63022 @@ -2149,22 +2568,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
63023
63024 profile_munmap(addr);
63025
63026 +#ifdef CONFIG_PAX_SEGMEXEC
63027 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
63028 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
63029 + return -EINVAL;
63030 +#endif
63031 +
63032 down_write(&mm->mmap_sem);
63033 ret = do_munmap(mm, addr, len);
63034 up_write(&mm->mmap_sem);
63035 return ret;
63036 }
63037
63038 -static inline void verify_mm_writelocked(struct mm_struct *mm)
63039 -{
63040 -#ifdef CONFIG_DEBUG_VM
63041 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
63042 - WARN_ON(1);
63043 - up_read(&mm->mmap_sem);
63044 - }
63045 -#endif
63046 -}
63047 -
63048 /*
63049 * this is really a simplified "do_mmap". it only handles
63050 * anonymous maps. eventually we may be able to do some
63051 @@ -2178,6 +2593,7 @@ unsigned long do_brk(unsigned long addr,
63052 struct rb_node ** rb_link, * rb_parent;
63053 pgoff_t pgoff = addr >> PAGE_SHIFT;
63054 int error;
63055 + unsigned long charged;
63056
63057 len = PAGE_ALIGN(len);
63058 if (!len)
63059 @@ -2189,16 +2605,30 @@ unsigned long do_brk(unsigned long addr,
63060
63061 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
63062
63063 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
63064 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
63065 + flags &= ~VM_EXEC;
63066 +
63067 +#ifdef CONFIG_PAX_MPROTECT
63068 + if (mm->pax_flags & MF_PAX_MPROTECT)
63069 + flags &= ~VM_MAYEXEC;
63070 +#endif
63071 +
63072 + }
63073 +#endif
63074 +
63075 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
63076 if (error & ~PAGE_MASK)
63077 return error;
63078
63079 + charged = len >> PAGE_SHIFT;
63080 +
63081 /*
63082 * mlock MCL_FUTURE?
63083 */
63084 if (mm->def_flags & VM_LOCKED) {
63085 unsigned long locked, lock_limit;
63086 - locked = len >> PAGE_SHIFT;
63087 + locked = charged;
63088 locked += mm->locked_vm;
63089 lock_limit = rlimit(RLIMIT_MEMLOCK);
63090 lock_limit >>= PAGE_SHIFT;
63091 @@ -2215,22 +2645,22 @@ unsigned long do_brk(unsigned long addr,
63092 /*
63093 * Clear old maps. this also does some error checking for us
63094 */
63095 - munmap_back:
63096 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63097 if (vma && vma->vm_start < addr + len) {
63098 if (do_munmap(mm, addr, len))
63099 return -ENOMEM;
63100 - goto munmap_back;
63101 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63102 + BUG_ON(vma && vma->vm_start < addr + len);
63103 }
63104
63105 /* Check against address space limits *after* clearing old maps... */
63106 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
63107 + if (!may_expand_vm(mm, charged))
63108 return -ENOMEM;
63109
63110 if (mm->map_count > sysctl_max_map_count)
63111 return -ENOMEM;
63112
63113 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
63114 + if (security_vm_enough_memory(charged))
63115 return -ENOMEM;
63116
63117 /* Can we just expand an old private anonymous mapping? */
63118 @@ -2244,7 +2674,7 @@ unsigned long do_brk(unsigned long addr,
63119 */
63120 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63121 if (!vma) {
63122 - vm_unacct_memory(len >> PAGE_SHIFT);
63123 + vm_unacct_memory(charged);
63124 return -ENOMEM;
63125 }
63126
63127 @@ -2258,11 +2688,12 @@ unsigned long do_brk(unsigned long addr,
63128 vma_link(mm, vma, prev, rb_link, rb_parent);
63129 out:
63130 perf_event_mmap(vma);
63131 - mm->total_vm += len >> PAGE_SHIFT;
63132 + mm->total_vm += charged;
63133 if (flags & VM_LOCKED) {
63134 if (!mlock_vma_pages_range(vma, addr, addr + len))
63135 - mm->locked_vm += (len >> PAGE_SHIFT);
63136 + mm->locked_vm += charged;
63137 }
63138 + track_exec_limit(mm, addr, addr + len, flags);
63139 return addr;
63140 }
63141
63142 @@ -2309,8 +2740,10 @@ void exit_mmap(struct mm_struct *mm)
63143 * Walk the list again, actually closing and freeing it,
63144 * with preemption enabled, without holding any MM locks.
63145 */
63146 - while (vma)
63147 + while (vma) {
63148 + vma->vm_mirror = NULL;
63149 vma = remove_vma(vma);
63150 + }
63151
63152 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
63153 }
63154 @@ -2324,6 +2757,13 @@ int insert_vm_struct(struct mm_struct *
63155 struct vm_area_struct * __vma, * prev;
63156 struct rb_node ** rb_link, * rb_parent;
63157
63158 +#ifdef CONFIG_PAX_SEGMEXEC
63159 + struct vm_area_struct *vma_m = NULL;
63160 +#endif
63161 +
63162 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
63163 + return -EPERM;
63164 +
63165 /*
63166 * The vm_pgoff of a purely anonymous vma should be irrelevant
63167 * until its first write fault, when page's anon_vma and index
63168 @@ -2346,7 +2786,22 @@ int insert_vm_struct(struct mm_struct *
63169 if ((vma->vm_flags & VM_ACCOUNT) &&
63170 security_vm_enough_memory_mm(mm, vma_pages(vma)))
63171 return -ENOMEM;
63172 +
63173 +#ifdef CONFIG_PAX_SEGMEXEC
63174 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
63175 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63176 + if (!vma_m)
63177 + return -ENOMEM;
63178 + }
63179 +#endif
63180 +
63181 vma_link(mm, vma, prev, rb_link, rb_parent);
63182 +
63183 +#ifdef CONFIG_PAX_SEGMEXEC
63184 + if (vma_m)
63185 + BUG_ON(pax_mirror_vma(vma_m, vma));
63186 +#endif
63187 +
63188 return 0;
63189 }
63190
63191 @@ -2364,6 +2819,8 @@ struct vm_area_struct *copy_vma(struct v
63192 struct rb_node **rb_link, *rb_parent;
63193 struct mempolicy *pol;
63194
63195 + BUG_ON(vma->vm_mirror);
63196 +
63197 /*
63198 * If anonymous vma has not yet been faulted, update new pgoff
63199 * to match new location, to increase its chance of merging.
63200 @@ -2414,6 +2871,39 @@ struct vm_area_struct *copy_vma(struct v
63201 return NULL;
63202 }
63203
63204 +#ifdef CONFIG_PAX_SEGMEXEC
63205 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
63206 +{
63207 + struct vm_area_struct *prev_m;
63208 + struct rb_node **rb_link_m, *rb_parent_m;
63209 + struct mempolicy *pol_m;
63210 +
63211 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
63212 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
63213 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
63214 + *vma_m = *vma;
63215 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
63216 + if (anon_vma_clone(vma_m, vma))
63217 + return -ENOMEM;
63218 + pol_m = vma_policy(vma_m);
63219 + mpol_get(pol_m);
63220 + vma_set_policy(vma_m, pol_m);
63221 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
63222 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
63223 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
63224 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
63225 + if (vma_m->vm_file)
63226 + get_file(vma_m->vm_file);
63227 + if (vma_m->vm_ops && vma_m->vm_ops->open)
63228 + vma_m->vm_ops->open(vma_m);
63229 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
63230 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
63231 + vma_m->vm_mirror = vma;
63232 + vma->vm_mirror = vma_m;
63233 + return 0;
63234 +}
63235 +#endif
63236 +
63237 /*
63238 * Return true if the calling process may expand its vm space by the passed
63239 * number of pages
63240 @@ -2424,7 +2914,7 @@ int may_expand_vm(struct mm_struct *mm,
63241 unsigned long lim;
63242
63243 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
63244 -
63245 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
63246 if (cur + npages > lim)
63247 return 0;
63248 return 1;
63249 @@ -2495,6 +2985,22 @@ int install_special_mapping(struct mm_st
63250 vma->vm_start = addr;
63251 vma->vm_end = addr + len;
63252
63253 +#ifdef CONFIG_PAX_MPROTECT
63254 + if (mm->pax_flags & MF_PAX_MPROTECT) {
63255 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
63256 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
63257 + return -EPERM;
63258 + if (!(vm_flags & VM_EXEC))
63259 + vm_flags &= ~VM_MAYEXEC;
63260 +#else
63261 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
63262 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
63263 +#endif
63264 + else
63265 + vm_flags &= ~VM_MAYWRITE;
63266 + }
63267 +#endif
63268 +
63269 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
63270 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
63271
63272 diff -urNp linux-2.6.39.4/mm/mprotect.c linux-2.6.39.4/mm/mprotect.c
63273 --- linux-2.6.39.4/mm/mprotect.c 2011-05-19 00:06:34.000000000 -0400
63274 +++ linux-2.6.39.4/mm/mprotect.c 2011-08-05 19:44:37.000000000 -0400
63275 @@ -23,10 +23,16 @@
63276 #include <linux/mmu_notifier.h>
63277 #include <linux/migrate.h>
63278 #include <linux/perf_event.h>
63279 +
63280 +#ifdef CONFIG_PAX_MPROTECT
63281 +#include <linux/elf.h>
63282 +#endif
63283 +
63284 #include <asm/uaccess.h>
63285 #include <asm/pgtable.h>
63286 #include <asm/cacheflush.h>
63287 #include <asm/tlbflush.h>
63288 +#include <asm/mmu_context.h>
63289
63290 #ifndef pgprot_modify
63291 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
63292 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
63293 flush_tlb_range(vma, start, end);
63294 }
63295
63296 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63297 +/* called while holding the mmap semaphor for writing except stack expansion */
63298 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
63299 +{
63300 + unsigned long oldlimit, newlimit = 0UL;
63301 +
63302 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
63303 + return;
63304 +
63305 + spin_lock(&mm->page_table_lock);
63306 + oldlimit = mm->context.user_cs_limit;
63307 + if ((prot & VM_EXEC) && oldlimit < end)
63308 + /* USER_CS limit moved up */
63309 + newlimit = end;
63310 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
63311 + /* USER_CS limit moved down */
63312 + newlimit = start;
63313 +
63314 + if (newlimit) {
63315 + mm->context.user_cs_limit = newlimit;
63316 +
63317 +#ifdef CONFIG_SMP
63318 + wmb();
63319 + cpus_clear(mm->context.cpu_user_cs_mask);
63320 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
63321 +#endif
63322 +
63323 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
63324 + }
63325 + spin_unlock(&mm->page_table_lock);
63326 + if (newlimit == end) {
63327 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
63328 +
63329 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
63330 + if (is_vm_hugetlb_page(vma))
63331 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
63332 + else
63333 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
63334 + }
63335 +}
63336 +#endif
63337 +
63338 int
63339 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
63340 unsigned long start, unsigned long end, unsigned long newflags)
63341 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
63342 int error;
63343 int dirty_accountable = 0;
63344
63345 +#ifdef CONFIG_PAX_SEGMEXEC
63346 + struct vm_area_struct *vma_m = NULL;
63347 + unsigned long start_m, end_m;
63348 +
63349 + start_m = start + SEGMEXEC_TASK_SIZE;
63350 + end_m = end + SEGMEXEC_TASK_SIZE;
63351 +#endif
63352 +
63353 if (newflags == oldflags) {
63354 *pprev = vma;
63355 return 0;
63356 }
63357
63358 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
63359 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63360 +
63361 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63362 + return -ENOMEM;
63363 +
63364 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63365 + return -ENOMEM;
63366 + }
63367 +
63368 /*
63369 * If we make a private mapping writable we increase our commit;
63370 * but (without finer accounting) cannot reduce our commit if we
63371 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63372 }
63373 }
63374
63375 +#ifdef CONFIG_PAX_SEGMEXEC
63376 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63377 + if (start != vma->vm_start) {
63378 + error = split_vma(mm, vma, start, 1);
63379 + if (error)
63380 + goto fail;
63381 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63382 + *pprev = (*pprev)->vm_next;
63383 + }
63384 +
63385 + if (end != vma->vm_end) {
63386 + error = split_vma(mm, vma, end, 0);
63387 + if (error)
63388 + goto fail;
63389 + }
63390 +
63391 + if (pax_find_mirror_vma(vma)) {
63392 + error = __do_munmap(mm, start_m, end_m - start_m);
63393 + if (error)
63394 + goto fail;
63395 + } else {
63396 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63397 + if (!vma_m) {
63398 + error = -ENOMEM;
63399 + goto fail;
63400 + }
63401 + vma->vm_flags = newflags;
63402 + error = pax_mirror_vma(vma_m, vma);
63403 + if (error) {
63404 + vma->vm_flags = oldflags;
63405 + goto fail;
63406 + }
63407 + }
63408 + }
63409 +#endif
63410 +
63411 /*
63412 * First try to merge with previous and/or next vma.
63413 */
63414 @@ -204,9 +306,21 @@ success:
63415 * vm_flags and vm_page_prot are protected by the mmap_sem
63416 * held in write mode.
63417 */
63418 +
63419 +#ifdef CONFIG_PAX_SEGMEXEC
63420 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63421 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63422 +#endif
63423 +
63424 vma->vm_flags = newflags;
63425 +
63426 +#ifdef CONFIG_PAX_MPROTECT
63427 + if (mm->binfmt && mm->binfmt->handle_mprotect)
63428 + mm->binfmt->handle_mprotect(vma, newflags);
63429 +#endif
63430 +
63431 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63432 - vm_get_page_prot(newflags));
63433 + vm_get_page_prot(vma->vm_flags));
63434
63435 if (vma_wants_writenotify(vma)) {
63436 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63437 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63438 end = start + len;
63439 if (end <= start)
63440 return -ENOMEM;
63441 +
63442 +#ifdef CONFIG_PAX_SEGMEXEC
63443 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63444 + if (end > SEGMEXEC_TASK_SIZE)
63445 + return -EINVAL;
63446 + } else
63447 +#endif
63448 +
63449 + if (end > TASK_SIZE)
63450 + return -EINVAL;
63451 +
63452 if (!arch_validate_prot(prot))
63453 return -EINVAL;
63454
63455 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63456 /*
63457 * Does the application expect PROT_READ to imply PROT_EXEC:
63458 */
63459 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63460 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63461 prot |= PROT_EXEC;
63462
63463 vm_flags = calc_vm_prot_bits(prot);
63464 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63465 if (start > vma->vm_start)
63466 prev = vma;
63467
63468 +#ifdef CONFIG_PAX_MPROTECT
63469 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63470 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
63471 +#endif
63472 +
63473 for (nstart = start ; ; ) {
63474 unsigned long newflags;
63475
63476 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63477
63478 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63479 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63480 + if (prot & (PROT_WRITE | PROT_EXEC))
63481 + gr_log_rwxmprotect(vma->vm_file);
63482 +
63483 + error = -EACCES;
63484 + goto out;
63485 + }
63486 +
63487 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63488 error = -EACCES;
63489 goto out;
63490 }
63491 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63492 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63493 if (error)
63494 goto out;
63495 +
63496 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
63497 +
63498 nstart = tmp;
63499
63500 if (nstart < prev->vm_end)
63501 diff -urNp linux-2.6.39.4/mm/mremap.c linux-2.6.39.4/mm/mremap.c
63502 --- linux-2.6.39.4/mm/mremap.c 2011-05-19 00:06:34.000000000 -0400
63503 +++ linux-2.6.39.4/mm/mremap.c 2011-08-05 19:44:37.000000000 -0400
63504 @@ -114,6 +114,12 @@ static void move_ptes(struct vm_area_str
63505 continue;
63506 pte = ptep_clear_flush(vma, old_addr, old_pte);
63507 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63508 +
63509 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63510 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63511 + pte = pte_exprotect(pte);
63512 +#endif
63513 +
63514 set_pte_at(mm, new_addr, new_pte, pte);
63515 }
63516
63517 @@ -273,6 +279,11 @@ static struct vm_area_struct *vma_to_res
63518 if (is_vm_hugetlb_page(vma))
63519 goto Einval;
63520
63521 +#ifdef CONFIG_PAX_SEGMEXEC
63522 + if (pax_find_mirror_vma(vma))
63523 + goto Einval;
63524 +#endif
63525 +
63526 /* We can't remap across vm area boundaries */
63527 if (old_len > vma->vm_end - addr)
63528 goto Efault;
63529 @@ -329,20 +340,25 @@ static unsigned long mremap_to(unsigned
63530 unsigned long ret = -EINVAL;
63531 unsigned long charged = 0;
63532 unsigned long map_flags;
63533 + unsigned long pax_task_size = TASK_SIZE;
63534
63535 if (new_addr & ~PAGE_MASK)
63536 goto out;
63537
63538 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63539 +#ifdef CONFIG_PAX_SEGMEXEC
63540 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63541 + pax_task_size = SEGMEXEC_TASK_SIZE;
63542 +#endif
63543 +
63544 + pax_task_size -= PAGE_SIZE;
63545 +
63546 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63547 goto out;
63548
63549 /* Check if the location we're moving into overlaps the
63550 * old location at all, and fail if it does.
63551 */
63552 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
63553 - goto out;
63554 -
63555 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
63556 + if (addr + old_len > new_addr && new_addr + new_len > addr)
63557 goto out;
63558
63559 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63560 @@ -414,6 +430,7 @@ unsigned long do_mremap(unsigned long ad
63561 struct vm_area_struct *vma;
63562 unsigned long ret = -EINVAL;
63563 unsigned long charged = 0;
63564 + unsigned long pax_task_size = TASK_SIZE;
63565
63566 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63567 goto out;
63568 @@ -432,6 +449,17 @@ unsigned long do_mremap(unsigned long ad
63569 if (!new_len)
63570 goto out;
63571
63572 +#ifdef CONFIG_PAX_SEGMEXEC
63573 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63574 + pax_task_size = SEGMEXEC_TASK_SIZE;
63575 +#endif
63576 +
63577 + pax_task_size -= PAGE_SIZE;
63578 +
63579 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63580 + old_len > pax_task_size || addr > pax_task_size-old_len)
63581 + goto out;
63582 +
63583 if (flags & MREMAP_FIXED) {
63584 if (flags & MREMAP_MAYMOVE)
63585 ret = mremap_to(addr, old_len, new_addr, new_len);
63586 @@ -481,6 +509,7 @@ unsigned long do_mremap(unsigned long ad
63587 addr + new_len);
63588 }
63589 ret = addr;
63590 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63591 goto out;
63592 }
63593 }
63594 @@ -507,7 +536,13 @@ unsigned long do_mremap(unsigned long ad
63595 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63596 if (ret)
63597 goto out;
63598 +
63599 + map_flags = vma->vm_flags;
63600 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63601 + if (!(ret & ~PAGE_MASK)) {
63602 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63603 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63604 + }
63605 }
63606 out:
63607 if (ret & ~PAGE_MASK)
63608 diff -urNp linux-2.6.39.4/mm/nobootmem.c linux-2.6.39.4/mm/nobootmem.c
63609 --- linux-2.6.39.4/mm/nobootmem.c 2011-05-19 00:06:34.000000000 -0400
63610 +++ linux-2.6.39.4/mm/nobootmem.c 2011-08-05 19:44:37.000000000 -0400
63611 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63612 unsigned long __init free_all_memory_core_early(int nodeid)
63613 {
63614 int i;
63615 - u64 start, end;
63616 + u64 start, end, startrange, endrange;
63617 unsigned long count = 0;
63618 - struct range *range = NULL;
63619 + struct range *range = NULL, rangerange = { 0, 0 };
63620 int nr_range;
63621
63622 nr_range = get_free_all_memory_range(&range, nodeid);
63623 + startrange = __pa(range) >> PAGE_SHIFT;
63624 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63625
63626 for (i = 0; i < nr_range; i++) {
63627 start = range[i].start;
63628 end = range[i].end;
63629 + if (start <= endrange && startrange < end) {
63630 + BUG_ON(rangerange.start | rangerange.end);
63631 + rangerange = range[i];
63632 + continue;
63633 + }
63634 count += end - start;
63635 __free_pages_memory(start, end);
63636 }
63637 + start = rangerange.start;
63638 + end = rangerange.end;
63639 + count += end - start;
63640 + __free_pages_memory(start, end);
63641
63642 return count;
63643 }
63644 diff -urNp linux-2.6.39.4/mm/nommu.c linux-2.6.39.4/mm/nommu.c
63645 --- linux-2.6.39.4/mm/nommu.c 2011-08-05 21:11:51.000000000 -0400
63646 +++ linux-2.6.39.4/mm/nommu.c 2011-08-05 21:12:20.000000000 -0400
63647 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63648 int sysctl_overcommit_ratio = 50; /* default is 50% */
63649 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63650 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63651 -int heap_stack_gap = 0;
63652
63653 atomic_long_t mmap_pages_allocated;
63654
63655 @@ -833,15 +832,6 @@ struct vm_area_struct *find_vma(struct m
63656 EXPORT_SYMBOL(find_vma);
63657
63658 /*
63659 - * find a VMA
63660 - * - we don't extend stack VMAs under NOMMU conditions
63661 - */
63662 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63663 -{
63664 - return find_vma(mm, addr);
63665 -}
63666 -
63667 -/*
63668 * expand a stack to a given address
63669 * - not supported under NOMMU conditions
63670 */
63671 @@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, stru
63672
63673 /* most fields are the same, copy all, and then fixup */
63674 *new = *vma;
63675 + INIT_LIST_HEAD(&new->anon_vma_chain);
63676 *region = *vma->vm_region;
63677 new->vm_region = region;
63678
63679 diff -urNp linux-2.6.39.4/mm/page_alloc.c linux-2.6.39.4/mm/page_alloc.c
63680 --- linux-2.6.39.4/mm/page_alloc.c 2011-06-03 00:04:14.000000000 -0400
63681 +++ linux-2.6.39.4/mm/page_alloc.c 2011-08-05 19:44:37.000000000 -0400
63682 @@ -337,7 +337,7 @@ out:
63683 * This usage means that zero-order pages may not be compound.
63684 */
63685
63686 -static void free_compound_page(struct page *page)
63687 +void free_compound_page(struct page *page)
63688 {
63689 __free_pages_ok(page, compound_order(page));
63690 }
63691 @@ -650,6 +650,10 @@ static bool free_pages_prepare(struct pa
63692 int i;
63693 int bad = 0;
63694
63695 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63696 + unsigned long index = 1UL << order;
63697 +#endif
63698 +
63699 trace_mm_page_free_direct(page, order);
63700 kmemcheck_free_shadow(page, order);
63701
63702 @@ -665,6 +669,12 @@ static bool free_pages_prepare(struct pa
63703 debug_check_no_obj_freed(page_address(page),
63704 PAGE_SIZE << order);
63705 }
63706 +
63707 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63708 + for (; index; --index)
63709 + sanitize_highpage(page + index - 1);
63710 +#endif
63711 +
63712 arch_free_page(page, order);
63713 kernel_map_pages(page, 1 << order, 0);
63714
63715 @@ -780,8 +790,10 @@ static int prep_new_page(struct page *pa
63716 arch_alloc_page(page, order);
63717 kernel_map_pages(page, 1 << order, 1);
63718
63719 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
63720 if (gfp_flags & __GFP_ZERO)
63721 prep_zero_page(page, order, gfp_flags);
63722 +#endif
63723
63724 if (order && (gfp_flags & __GFP_COMP))
63725 prep_compound_page(page, order);
63726 @@ -2504,6 +2516,8 @@ void __show_free_areas(unsigned int filt
63727 int cpu;
63728 struct zone *zone;
63729
63730 + pax_track_stack();
63731 +
63732 for_each_populated_zone(zone) {
63733 if (skip_free_areas_zone(filter, zone))
63734 continue;
63735 diff -urNp linux-2.6.39.4/mm/percpu.c linux-2.6.39.4/mm/percpu.c
63736 --- linux-2.6.39.4/mm/percpu.c 2011-05-19 00:06:34.000000000 -0400
63737 +++ linux-2.6.39.4/mm/percpu.c 2011-08-05 19:44:37.000000000 -0400
63738 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63739 static unsigned int pcpu_last_unit_cpu __read_mostly;
63740
63741 /* the address of the first chunk which starts with the kernel static area */
63742 -void *pcpu_base_addr __read_mostly;
63743 +void *pcpu_base_addr __read_only;
63744 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63745
63746 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63747 diff -urNp linux-2.6.39.4/mm/rmap.c linux-2.6.39.4/mm/rmap.c
63748 --- linux-2.6.39.4/mm/rmap.c 2011-05-19 00:06:34.000000000 -0400
63749 +++ linux-2.6.39.4/mm/rmap.c 2011-08-05 19:44:37.000000000 -0400
63750 @@ -131,6 +131,10 @@ int anon_vma_prepare(struct vm_area_stru
63751 struct anon_vma *anon_vma = vma->anon_vma;
63752 struct anon_vma_chain *avc;
63753
63754 +#ifdef CONFIG_PAX_SEGMEXEC
63755 + struct anon_vma_chain *avc_m = NULL;
63756 +#endif
63757 +
63758 might_sleep();
63759 if (unlikely(!anon_vma)) {
63760 struct mm_struct *mm = vma->vm_mm;
63761 @@ -140,6 +144,12 @@ int anon_vma_prepare(struct vm_area_stru
63762 if (!avc)
63763 goto out_enomem;
63764
63765 +#ifdef CONFIG_PAX_SEGMEXEC
63766 + avc_m = anon_vma_chain_alloc();
63767 + if (!avc_m)
63768 + goto out_enomem_free_avc;
63769 +#endif
63770 +
63771 anon_vma = find_mergeable_anon_vma(vma);
63772 allocated = NULL;
63773 if (!anon_vma) {
63774 @@ -153,6 +163,21 @@ int anon_vma_prepare(struct vm_area_stru
63775 /* page_table_lock to protect against threads */
63776 spin_lock(&mm->page_table_lock);
63777 if (likely(!vma->anon_vma)) {
63778 +
63779 +#ifdef CONFIG_PAX_SEGMEXEC
63780 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63781 +
63782 + if (vma_m) {
63783 + BUG_ON(vma_m->anon_vma);
63784 + vma_m->anon_vma = anon_vma;
63785 + avc_m->anon_vma = anon_vma;
63786 + avc_m->vma = vma;
63787 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63788 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
63789 + avc_m = NULL;
63790 + }
63791 +#endif
63792 +
63793 vma->anon_vma = anon_vma;
63794 avc->anon_vma = anon_vma;
63795 avc->vma = vma;
63796 @@ -166,12 +191,24 @@ int anon_vma_prepare(struct vm_area_stru
63797
63798 if (unlikely(allocated))
63799 put_anon_vma(allocated);
63800 +
63801 +#ifdef CONFIG_PAX_SEGMEXEC
63802 + if (unlikely(avc_m))
63803 + anon_vma_chain_free(avc_m);
63804 +#endif
63805 +
63806 if (unlikely(avc))
63807 anon_vma_chain_free(avc);
63808 }
63809 return 0;
63810
63811 out_enomem_free_avc:
63812 +
63813 +#ifdef CONFIG_PAX_SEGMEXEC
63814 + if (avc_m)
63815 + anon_vma_chain_free(avc_m);
63816 +#endif
63817 +
63818 anon_vma_chain_free(avc);
63819 out_enomem:
63820 return -ENOMEM;
63821 @@ -198,7 +235,7 @@ static void anon_vma_chain_link(struct v
63822 * Attach the anon_vmas from src to dst.
63823 * Returns 0 on success, -ENOMEM on failure.
63824 */
63825 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63826 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63827 {
63828 struct anon_vma_chain *avc, *pavc;
63829
63830 @@ -220,7 +257,7 @@ int anon_vma_clone(struct vm_area_struct
63831 * the corresponding VMA in the parent process is attached to.
63832 * Returns 0 on success, non-zero on failure.
63833 */
63834 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63835 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63836 {
63837 struct anon_vma_chain *avc;
63838 struct anon_vma *anon_vma;
63839 diff -urNp linux-2.6.39.4/mm/shmem.c linux-2.6.39.4/mm/shmem.c
63840 --- linux-2.6.39.4/mm/shmem.c 2011-06-03 00:04:14.000000000 -0400
63841 +++ linux-2.6.39.4/mm/shmem.c 2011-08-05 19:44:37.000000000 -0400
63842 @@ -31,7 +31,7 @@
63843 #include <linux/percpu_counter.h>
63844 #include <linux/swap.h>
63845
63846 -static struct vfsmount *shm_mnt;
63847 +struct vfsmount *shm_mnt;
63848
63849 #ifdef CONFIG_SHMEM
63850 /*
63851 @@ -1087,6 +1087,8 @@ static int shmem_writepage(struct page *
63852 goto unlock;
63853 }
63854 entry = shmem_swp_entry(info, index, NULL);
63855 + if (!entry)
63856 + goto unlock;
63857 if (entry->val) {
63858 /*
63859 * The more uptodate page coming down from a stacked
63860 @@ -1158,6 +1160,8 @@ static struct page *shmem_swapin(swp_ent
63861 struct vm_area_struct pvma;
63862 struct page *page;
63863
63864 + pax_track_stack();
63865 +
63866 spol = mpol_cond_copy(&mpol,
63867 mpol_shared_policy_lookup(&info->policy, idx));
63868
63869 @@ -2014,7 +2018,7 @@ static int shmem_symlink(struct inode *d
63870
63871 info = SHMEM_I(inode);
63872 inode->i_size = len-1;
63873 - if (len <= (char *)inode - (char *)info) {
63874 + if (len <= (char *)inode - (char *)info && len <= 64) {
63875 /* do it inline */
63876 memcpy(info, symname, len);
63877 inode->i_op = &shmem_symlink_inline_operations;
63878 @@ -2362,8 +2366,7 @@ int shmem_fill_super(struct super_block
63879 int err = -ENOMEM;
63880
63881 /* Round up to L1_CACHE_BYTES to resist false sharing */
63882 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63883 - L1_CACHE_BYTES), GFP_KERNEL);
63884 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63885 if (!sbinfo)
63886 return -ENOMEM;
63887
63888 diff -urNp linux-2.6.39.4/mm/slab.c linux-2.6.39.4/mm/slab.c
63889 --- linux-2.6.39.4/mm/slab.c 2011-05-19 00:06:34.000000000 -0400
63890 +++ linux-2.6.39.4/mm/slab.c 2011-08-05 19:44:37.000000000 -0400
63891 @@ -150,7 +150,7 @@
63892
63893 /* Legal flag mask for kmem_cache_create(). */
63894 #if DEBUG
63895 -# define CREATE_MASK (SLAB_RED_ZONE | \
63896 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63897 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63898 SLAB_CACHE_DMA | \
63899 SLAB_STORE_USER | \
63900 @@ -158,7 +158,7 @@
63901 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63902 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63903 #else
63904 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63905 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63906 SLAB_CACHE_DMA | \
63907 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63908 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63909 @@ -287,7 +287,7 @@ struct kmem_list3 {
63910 * Need this for bootstrapping a per node allocator.
63911 */
63912 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63913 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63914 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63915 #define CACHE_CACHE 0
63916 #define SIZE_AC MAX_NUMNODES
63917 #define SIZE_L3 (2 * MAX_NUMNODES)
63918 @@ -388,10 +388,10 @@ static void kmem_list3_init(struct kmem_
63919 if ((x)->max_freeable < i) \
63920 (x)->max_freeable = i; \
63921 } while (0)
63922 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63923 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63924 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63925 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63926 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63927 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63928 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63929 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63930 #else
63931 #define STATS_INC_ACTIVE(x) do { } while (0)
63932 #define STATS_DEC_ACTIVE(x) do { } while (0)
63933 @@ -537,7 +537,7 @@ static inline void *index_to_obj(struct
63934 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63935 */
63936 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63937 - const struct slab *slab, void *obj)
63938 + const struct slab *slab, const void *obj)
63939 {
63940 u32 offset = (obj - slab->s_mem);
63941 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63942 @@ -563,7 +563,7 @@ struct cache_names {
63943 static struct cache_names __initdata cache_names[] = {
63944 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63945 #include <linux/kmalloc_sizes.h>
63946 - {NULL,}
63947 + {NULL}
63948 #undef CACHE
63949 };
63950
63951 @@ -1529,7 +1529,7 @@ void __init kmem_cache_init(void)
63952 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63953 sizes[INDEX_AC].cs_size,
63954 ARCH_KMALLOC_MINALIGN,
63955 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63956 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63957 NULL);
63958
63959 if (INDEX_AC != INDEX_L3) {
63960 @@ -1537,7 +1537,7 @@ void __init kmem_cache_init(void)
63961 kmem_cache_create(names[INDEX_L3].name,
63962 sizes[INDEX_L3].cs_size,
63963 ARCH_KMALLOC_MINALIGN,
63964 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63965 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63966 NULL);
63967 }
63968
63969 @@ -1555,7 +1555,7 @@ void __init kmem_cache_init(void)
63970 sizes->cs_cachep = kmem_cache_create(names->name,
63971 sizes->cs_size,
63972 ARCH_KMALLOC_MINALIGN,
63973 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63974 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63975 NULL);
63976 }
63977 #ifdef CONFIG_ZONE_DMA
63978 @@ -4270,10 +4270,10 @@ static int s_show(struct seq_file *m, vo
63979 }
63980 /* cpu stats */
63981 {
63982 - unsigned long allochit = atomic_read(&cachep->allochit);
63983 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63984 - unsigned long freehit = atomic_read(&cachep->freehit);
63985 - unsigned long freemiss = atomic_read(&cachep->freemiss);
63986 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63987 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63988 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63989 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63990
63991 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63992 allochit, allocmiss, freehit, freemiss);
63993 @@ -4530,15 +4530,66 @@ static const struct file_operations proc
63994
63995 static int __init slab_proc_init(void)
63996 {
63997 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63998 + mode_t gr_mode = S_IRUGO;
63999 +
64000 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64001 + gr_mode = S_IRUSR;
64002 +#endif
64003 +
64004 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
64005 #ifdef CONFIG_DEBUG_SLAB_LEAK
64006 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
64007 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
64008 #endif
64009 return 0;
64010 }
64011 module_init(slab_proc_init);
64012 #endif
64013
64014 +void check_object_size(const void *ptr, unsigned long n, bool to)
64015 +{
64016 +
64017 +#ifdef CONFIG_PAX_USERCOPY
64018 + struct page *page;
64019 + struct kmem_cache *cachep = NULL;
64020 + struct slab *slabp;
64021 + unsigned int objnr;
64022 + unsigned long offset;
64023 +
64024 + if (!n)
64025 + return;
64026 +
64027 + if (ZERO_OR_NULL_PTR(ptr))
64028 + goto report;
64029 +
64030 + if (!virt_addr_valid(ptr))
64031 + return;
64032 +
64033 + page = virt_to_head_page(ptr);
64034 +
64035 + if (!PageSlab(page)) {
64036 + if (object_is_on_stack(ptr, n) == -1)
64037 + goto report;
64038 + return;
64039 + }
64040 +
64041 + cachep = page_get_cache(page);
64042 + if (!(cachep->flags & SLAB_USERCOPY))
64043 + goto report;
64044 +
64045 + slabp = page_get_slab(page);
64046 + objnr = obj_to_index(cachep, slabp, ptr);
64047 + BUG_ON(objnr >= cachep->num);
64048 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
64049 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
64050 + return;
64051 +
64052 +report:
64053 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
64054 +#endif
64055 +
64056 +}
64057 +EXPORT_SYMBOL(check_object_size);
64058 +
64059 /**
64060 * ksize - get the actual amount of memory allocated for a given object
64061 * @objp: Pointer to the object
64062 diff -urNp linux-2.6.39.4/mm/slob.c linux-2.6.39.4/mm/slob.c
64063 --- linux-2.6.39.4/mm/slob.c 2011-05-19 00:06:34.000000000 -0400
64064 +++ linux-2.6.39.4/mm/slob.c 2011-08-05 19:44:37.000000000 -0400
64065 @@ -29,7 +29,7 @@
64066 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
64067 * alloc_pages() directly, allocating compound pages so the page order
64068 * does not have to be separately tracked, and also stores the exact
64069 - * allocation size in page->private so that it can be used to accurately
64070 + * allocation size in slob_page->size so that it can be used to accurately
64071 * provide ksize(). These objects are detected in kfree() because slob_page()
64072 * is false for them.
64073 *
64074 @@ -58,6 +58,7 @@
64075 */
64076
64077 #include <linux/kernel.h>
64078 +#include <linux/sched.h>
64079 #include <linux/slab.h>
64080 #include <linux/mm.h>
64081 #include <linux/swap.h> /* struct reclaim_state */
64082 @@ -102,7 +103,8 @@ struct slob_page {
64083 unsigned long flags; /* mandatory */
64084 atomic_t _count; /* mandatory */
64085 slobidx_t units; /* free units left in page */
64086 - unsigned long pad[2];
64087 + unsigned long pad[1];
64088 + unsigned long size; /* size when >=PAGE_SIZE */
64089 slob_t *free; /* first free slob_t in page */
64090 struct list_head list; /* linked list of free pages */
64091 };
64092 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
64093 */
64094 static inline int is_slob_page(struct slob_page *sp)
64095 {
64096 - return PageSlab((struct page *)sp);
64097 + return PageSlab((struct page *)sp) && !sp->size;
64098 }
64099
64100 static inline void set_slob_page(struct slob_page *sp)
64101 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
64102
64103 static inline struct slob_page *slob_page(const void *addr)
64104 {
64105 - return (struct slob_page *)virt_to_page(addr);
64106 + return (struct slob_page *)virt_to_head_page(addr);
64107 }
64108
64109 /*
64110 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
64111 /*
64112 * Return the size of a slob block.
64113 */
64114 -static slobidx_t slob_units(slob_t *s)
64115 +static slobidx_t slob_units(const slob_t *s)
64116 {
64117 if (s->units > 0)
64118 return s->units;
64119 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
64120 /*
64121 * Return the next free slob block pointer after this one.
64122 */
64123 -static slob_t *slob_next(slob_t *s)
64124 +static slob_t *slob_next(const slob_t *s)
64125 {
64126 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
64127 slobidx_t next;
64128 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
64129 /*
64130 * Returns true if s is the last free block in its page.
64131 */
64132 -static int slob_last(slob_t *s)
64133 +static int slob_last(const slob_t *s)
64134 {
64135 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
64136 }
64137 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
64138 if (!page)
64139 return NULL;
64140
64141 + set_slob_page(page);
64142 return page_address(page);
64143 }
64144
64145 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
64146 if (!b)
64147 return NULL;
64148 sp = slob_page(b);
64149 - set_slob_page(sp);
64150
64151 spin_lock_irqsave(&slob_lock, flags);
64152 sp->units = SLOB_UNITS(PAGE_SIZE);
64153 sp->free = b;
64154 + sp->size = 0;
64155 INIT_LIST_HEAD(&sp->list);
64156 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
64157 set_slob_page_free(sp, slob_list);
64158 @@ -476,10 +479,9 @@ out:
64159 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
64160 */
64161
64162 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64163 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
64164 {
64165 - unsigned int *m;
64166 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64167 + slob_t *m;
64168 void *ret;
64169
64170 lockdep_trace_alloc(gfp);
64171 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
64172
64173 if (!m)
64174 return NULL;
64175 - *m = size;
64176 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
64177 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
64178 + m[0].units = size;
64179 + m[1].units = align;
64180 ret = (void *)m + align;
64181
64182 trace_kmalloc_node(_RET_IP_, ret,
64183 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
64184 gfp |= __GFP_COMP;
64185 ret = slob_new_pages(gfp, order, node);
64186 if (ret) {
64187 - struct page *page;
64188 - page = virt_to_page(ret);
64189 - page->private = size;
64190 + struct slob_page *sp;
64191 + sp = slob_page(ret);
64192 + sp->size = size;
64193 }
64194
64195 trace_kmalloc_node(_RET_IP_, ret,
64196 size, PAGE_SIZE << order, gfp, node);
64197 }
64198
64199 - kmemleak_alloc(ret, size, 1, gfp);
64200 + return ret;
64201 +}
64202 +
64203 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64204 +{
64205 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64206 + void *ret = __kmalloc_node_align(size, gfp, node, align);
64207 +
64208 + if (!ZERO_OR_NULL_PTR(ret))
64209 + kmemleak_alloc(ret, size, 1, gfp);
64210 return ret;
64211 }
64212 EXPORT_SYMBOL(__kmalloc_node);
64213 @@ -531,13 +545,88 @@ void kfree(const void *block)
64214 sp = slob_page(block);
64215 if (is_slob_page(sp)) {
64216 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64217 - unsigned int *m = (unsigned int *)(block - align);
64218 - slob_free(m, *m + align);
64219 - } else
64220 + slob_t *m = (slob_t *)(block - align);
64221 + slob_free(m, m[0].units + align);
64222 + } else {
64223 + clear_slob_page(sp);
64224 + free_slob_page(sp);
64225 + sp->size = 0;
64226 put_page(&sp->page);
64227 + }
64228 }
64229 EXPORT_SYMBOL(kfree);
64230
64231 +void check_object_size(const void *ptr, unsigned long n, bool to)
64232 +{
64233 +
64234 +#ifdef CONFIG_PAX_USERCOPY
64235 + struct slob_page *sp;
64236 + const slob_t *free;
64237 + const void *base;
64238 + unsigned long flags;
64239 +
64240 + if (!n)
64241 + return;
64242 +
64243 + if (ZERO_OR_NULL_PTR(ptr))
64244 + goto report;
64245 +
64246 + if (!virt_addr_valid(ptr))
64247 + return;
64248 +
64249 + sp = slob_page(ptr);
64250 + if (!PageSlab((struct page*)sp)) {
64251 + if (object_is_on_stack(ptr, n) == -1)
64252 + goto report;
64253 + return;
64254 + }
64255 +
64256 + if (sp->size) {
64257 + base = page_address(&sp->page);
64258 + if (base <= ptr && n <= sp->size - (ptr - base))
64259 + return;
64260 + goto report;
64261 + }
64262 +
64263 + /* some tricky double walking to find the chunk */
64264 + spin_lock_irqsave(&slob_lock, flags);
64265 + base = (void *)((unsigned long)ptr & PAGE_MASK);
64266 + free = sp->free;
64267 +
64268 + while (!slob_last(free) && (void *)free <= ptr) {
64269 + base = free + slob_units(free);
64270 + free = slob_next(free);
64271 + }
64272 +
64273 + while (base < (void *)free) {
64274 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
64275 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
64276 + int offset;
64277 +
64278 + if (ptr < base + align)
64279 + break;
64280 +
64281 + offset = ptr - base - align;
64282 + if (offset >= m) {
64283 + base += size;
64284 + continue;
64285 + }
64286 +
64287 + if (n > m - offset)
64288 + break;
64289 +
64290 + spin_unlock_irqrestore(&slob_lock, flags);
64291 + return;
64292 + }
64293 +
64294 + spin_unlock_irqrestore(&slob_lock, flags);
64295 +report:
64296 + pax_report_usercopy(ptr, n, to, NULL);
64297 +#endif
64298 +
64299 +}
64300 +EXPORT_SYMBOL(check_object_size);
64301 +
64302 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
64303 size_t ksize(const void *block)
64304 {
64305 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
64306 sp = slob_page(block);
64307 if (is_slob_page(sp)) {
64308 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64309 - unsigned int *m = (unsigned int *)(block - align);
64310 - return SLOB_UNITS(*m) * SLOB_UNIT;
64311 + slob_t *m = (slob_t *)(block - align);
64312 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
64313 } else
64314 - return sp->page.private;
64315 + return sp->size;
64316 }
64317 EXPORT_SYMBOL(ksize);
64318
64319 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
64320 {
64321 struct kmem_cache *c;
64322
64323 +#ifdef CONFIG_PAX_USERCOPY
64324 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
64325 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
64326 +#else
64327 c = slob_alloc(sizeof(struct kmem_cache),
64328 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
64329 +#endif
64330
64331 if (c) {
64332 c->name = name;
64333 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
64334 {
64335 void *b;
64336
64337 +#ifdef CONFIG_PAX_USERCOPY
64338 + b = __kmalloc_node_align(c->size, flags, node, c->align);
64339 +#else
64340 if (c->size < PAGE_SIZE) {
64341 b = slob_alloc(c->size, flags, c->align, node);
64342 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64343 SLOB_UNITS(c->size) * SLOB_UNIT,
64344 flags, node);
64345 } else {
64346 + struct slob_page *sp;
64347 +
64348 b = slob_new_pages(flags, get_order(c->size), node);
64349 + sp = slob_page(b);
64350 + sp->size = c->size;
64351 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64352 PAGE_SIZE << get_order(c->size),
64353 flags, node);
64354 }
64355 +#endif
64356
64357 if (c->ctor)
64358 c->ctor(b);
64359 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
64360
64361 static void __kmem_cache_free(void *b, int size)
64362 {
64363 - if (size < PAGE_SIZE)
64364 + struct slob_page *sp = slob_page(b);
64365 +
64366 + if (is_slob_page(sp))
64367 slob_free(b, size);
64368 - else
64369 + else {
64370 + clear_slob_page(sp);
64371 + free_slob_page(sp);
64372 + sp->size = 0;
64373 slob_free_pages(b, get_order(size));
64374 + }
64375 }
64376
64377 static void kmem_rcu_free(struct rcu_head *head)
64378 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64379
64380 void kmem_cache_free(struct kmem_cache *c, void *b)
64381 {
64382 + int size = c->size;
64383 +
64384 +#ifdef CONFIG_PAX_USERCOPY
64385 + if (size + c->align < PAGE_SIZE) {
64386 + size += c->align;
64387 + b -= c->align;
64388 + }
64389 +#endif
64390 +
64391 kmemleak_free_recursive(b, c->flags);
64392 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64393 struct slob_rcu *slob_rcu;
64394 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64395 - slob_rcu->size = c->size;
64396 + slob_rcu = b + (size - sizeof(struct slob_rcu));
64397 + slob_rcu->size = size;
64398 call_rcu(&slob_rcu->head, kmem_rcu_free);
64399 } else {
64400 - __kmem_cache_free(b, c->size);
64401 + __kmem_cache_free(b, size);
64402 }
64403
64404 +#ifdef CONFIG_PAX_USERCOPY
64405 + trace_kfree(_RET_IP_, b);
64406 +#else
64407 trace_kmem_cache_free(_RET_IP_, b);
64408 +#endif
64409 +
64410 }
64411 EXPORT_SYMBOL(kmem_cache_free);
64412
64413 diff -urNp linux-2.6.39.4/mm/slub.c linux-2.6.39.4/mm/slub.c
64414 --- linux-2.6.39.4/mm/slub.c 2011-06-03 00:04:14.000000000 -0400
64415 +++ linux-2.6.39.4/mm/slub.c 2011-08-05 19:44:37.000000000 -0400
64416 @@ -431,7 +431,7 @@ static void print_track(const char *s, s
64417 if (!t->addr)
64418 return;
64419
64420 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64421 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64422 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64423 }
64424
64425 @@ -2183,6 +2183,8 @@ void kmem_cache_free(struct kmem_cache *
64426
64427 page = virt_to_head_page(x);
64428
64429 + BUG_ON(!PageSlab(page));
64430 +
64431 slab_free(s, page, x, _RET_IP_);
64432
64433 trace_kmem_cache_free(_RET_IP_, x);
64434 @@ -2216,7 +2218,7 @@ static int slub_min_objects;
64435 * Merge control. If this is set then no merging of slab caches will occur.
64436 * (Could be removed. This was introduced to pacify the merge skeptics.)
64437 */
64438 -static int slub_nomerge;
64439 +static int slub_nomerge = 1;
64440
64441 /*
64442 * Calculate the order of allocation given an slab object size.
64443 @@ -2644,7 +2646,7 @@ static int kmem_cache_open(struct kmem_c
64444 * list to avoid pounding the page allocator excessively.
64445 */
64446 set_min_partial(s, ilog2(s->size));
64447 - s->refcount = 1;
64448 + atomic_set(&s->refcount, 1);
64449 #ifdef CONFIG_NUMA
64450 s->remote_node_defrag_ratio = 1000;
64451 #endif
64452 @@ -2750,8 +2752,7 @@ static inline int kmem_cache_close(struc
64453 void kmem_cache_destroy(struct kmem_cache *s)
64454 {
64455 down_write(&slub_lock);
64456 - s->refcount--;
64457 - if (!s->refcount) {
64458 + if (atomic_dec_and_test(&s->refcount)) {
64459 list_del(&s->list);
64460 if (kmem_cache_close(s)) {
64461 printk(KERN_ERR "SLUB %s: %s called for cache that "
64462 @@ -2961,6 +2962,46 @@ void *__kmalloc_node(size_t size, gfp_t
64463 EXPORT_SYMBOL(__kmalloc_node);
64464 #endif
64465
64466 +void check_object_size(const void *ptr, unsigned long n, bool to)
64467 +{
64468 +
64469 +#ifdef CONFIG_PAX_USERCOPY
64470 + struct page *page;
64471 + struct kmem_cache *s = NULL;
64472 + unsigned long offset;
64473 +
64474 + if (!n)
64475 + return;
64476 +
64477 + if (ZERO_OR_NULL_PTR(ptr))
64478 + goto report;
64479 +
64480 + if (!virt_addr_valid(ptr))
64481 + return;
64482 +
64483 + page = virt_to_head_page(ptr);
64484 +
64485 + if (!PageSlab(page)) {
64486 + if (object_is_on_stack(ptr, n) == -1)
64487 + goto report;
64488 + return;
64489 + }
64490 +
64491 + s = page->slab;
64492 + if (!(s->flags & SLAB_USERCOPY))
64493 + goto report;
64494 +
64495 + offset = (ptr - page_address(page)) % s->size;
64496 + if (offset <= s->objsize && n <= s->objsize - offset)
64497 + return;
64498 +
64499 +report:
64500 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64501 +#endif
64502 +
64503 +}
64504 +EXPORT_SYMBOL(check_object_size);
64505 +
64506 size_t ksize(const void *object)
64507 {
64508 struct page *page;
64509 @@ -3205,7 +3246,7 @@ static void __init kmem_cache_bootstrap_
64510 int node;
64511
64512 list_add(&s->list, &slab_caches);
64513 - s->refcount = -1;
64514 + atomic_set(&s->refcount, -1);
64515
64516 for_each_node_state(node, N_NORMAL_MEMORY) {
64517 struct kmem_cache_node *n = get_node(s, node);
64518 @@ -3322,17 +3363,17 @@ void __init kmem_cache_init(void)
64519
64520 /* Caches that are not of the two-to-the-power-of size */
64521 if (KMALLOC_MIN_SIZE <= 32) {
64522 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64523 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64524 caches++;
64525 }
64526
64527 if (KMALLOC_MIN_SIZE <= 64) {
64528 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64529 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64530 caches++;
64531 }
64532
64533 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64534 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64535 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64536 caches++;
64537 }
64538
64539 @@ -3400,7 +3441,7 @@ static int slab_unmergeable(struct kmem_
64540 /*
64541 * We may have set a slab to be unmergeable during bootstrap.
64542 */
64543 - if (s->refcount < 0)
64544 + if (atomic_read(&s->refcount) < 0)
64545 return 1;
64546
64547 return 0;
64548 @@ -3459,7 +3500,7 @@ struct kmem_cache *kmem_cache_create(con
64549 down_write(&slub_lock);
64550 s = find_mergeable(size, align, flags, name, ctor);
64551 if (s) {
64552 - s->refcount++;
64553 + atomic_inc(&s->refcount);
64554 /*
64555 * Adjust the object sizes so that we clear
64556 * the complete object on kzalloc.
64557 @@ -3468,7 +3509,7 @@ struct kmem_cache *kmem_cache_create(con
64558 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64559
64560 if (sysfs_slab_alias(s, name)) {
64561 - s->refcount--;
64562 + atomic_dec(&s->refcount);
64563 goto err;
64564 }
64565 up_write(&slub_lock);
64566 @@ -4201,7 +4242,7 @@ SLAB_ATTR_RO(ctor);
64567
64568 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64569 {
64570 - return sprintf(buf, "%d\n", s->refcount - 1);
64571 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64572 }
64573 SLAB_ATTR_RO(aliases);
64574
64575 @@ -4945,7 +4986,13 @@ static const struct file_operations proc
64576
64577 static int __init slab_proc_init(void)
64578 {
64579 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64580 + mode_t gr_mode = S_IRUGO;
64581 +
64582 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64583 + gr_mode = S_IRUSR;
64584 +#endif
64585 +
64586 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64587 return 0;
64588 }
64589 module_init(slab_proc_init);
64590 diff -urNp linux-2.6.39.4/mm/swap.c linux-2.6.39.4/mm/swap.c
64591 --- linux-2.6.39.4/mm/swap.c 2011-05-19 00:06:34.000000000 -0400
64592 +++ linux-2.6.39.4/mm/swap.c 2011-08-05 19:44:37.000000000 -0400
64593 @@ -31,6 +31,7 @@
64594 #include <linux/backing-dev.h>
64595 #include <linux/memcontrol.h>
64596 #include <linux/gfp.h>
64597 +#include <linux/hugetlb.h>
64598
64599 #include "internal.h"
64600
64601 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64602
64603 __page_cache_release(page);
64604 dtor = get_compound_page_dtor(page);
64605 + if (!PageHuge(page))
64606 + BUG_ON(dtor != free_compound_page);
64607 (*dtor)(page);
64608 }
64609
64610 diff -urNp linux-2.6.39.4/mm/swapfile.c linux-2.6.39.4/mm/swapfile.c
64611 --- linux-2.6.39.4/mm/swapfile.c 2011-05-19 00:06:34.000000000 -0400
64612 +++ linux-2.6.39.4/mm/swapfile.c 2011-08-05 19:44:37.000000000 -0400
64613 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
64614
64615 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64616 /* Activity counter to indicate that a swapon or swapoff has occurred */
64617 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
64618 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64619
64620 static inline unsigned char swap_count(unsigned char ent)
64621 {
64622 @@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64623 }
64624 filp_close(swap_file, NULL);
64625 err = 0;
64626 - atomic_inc(&proc_poll_event);
64627 + atomic_inc_unchecked(&proc_poll_event);
64628 wake_up_interruptible(&proc_poll_wait);
64629
64630 out_dput:
64631 @@ -1690,8 +1690,8 @@ static unsigned swaps_poll(struct file *
64632
64633 poll_wait(file, &proc_poll_wait, wait);
64634
64635 - if (s->event != atomic_read(&proc_poll_event)) {
64636 - s->event = atomic_read(&proc_poll_event);
64637 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64638 + s->event = atomic_read_unchecked(&proc_poll_event);
64639 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64640 }
64641
64642 @@ -1797,7 +1797,7 @@ static int swaps_open(struct inode *inod
64643 }
64644
64645 s->seq.private = s;
64646 - s->event = atomic_read(&proc_poll_event);
64647 + s->event = atomic_read_unchecked(&proc_poll_event);
64648 return ret;
64649 }
64650
64651 @@ -2131,7 +2131,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64652 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64653
64654 mutex_unlock(&swapon_mutex);
64655 - atomic_inc(&proc_poll_event);
64656 + atomic_inc_unchecked(&proc_poll_event);
64657 wake_up_interruptible(&proc_poll_wait);
64658
64659 if (S_ISREG(inode->i_mode))
64660 diff -urNp linux-2.6.39.4/mm/util.c linux-2.6.39.4/mm/util.c
64661 --- linux-2.6.39.4/mm/util.c 2011-05-19 00:06:34.000000000 -0400
64662 +++ linux-2.6.39.4/mm/util.c 2011-08-05 19:44:37.000000000 -0400
64663 @@ -112,6 +112,7 @@ EXPORT_SYMBOL(memdup_user);
64664 * allocated buffer. Use this if you don't want to free the buffer immediately
64665 * like, for example, with RCU.
64666 */
64667 +#undef __krealloc
64668 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64669 {
64670 void *ret;
64671 @@ -145,6 +146,7 @@ EXPORT_SYMBOL(__krealloc);
64672 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64673 * %NULL pointer, the object pointed to is freed.
64674 */
64675 +#undef krealloc
64676 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64677 {
64678 void *ret;
64679 @@ -219,6 +221,12 @@ EXPORT_SYMBOL(strndup_user);
64680 void arch_pick_mmap_layout(struct mm_struct *mm)
64681 {
64682 mm->mmap_base = TASK_UNMAPPED_BASE;
64683 +
64684 +#ifdef CONFIG_PAX_RANDMMAP
64685 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64686 + mm->mmap_base += mm->delta_mmap;
64687 +#endif
64688 +
64689 mm->get_unmapped_area = arch_get_unmapped_area;
64690 mm->unmap_area = arch_unmap_area;
64691 }
64692 diff -urNp linux-2.6.39.4/mm/vmalloc.c linux-2.6.39.4/mm/vmalloc.c
64693 --- linux-2.6.39.4/mm/vmalloc.c 2011-05-19 00:06:34.000000000 -0400
64694 +++ linux-2.6.39.4/mm/vmalloc.c 2011-08-05 19:44:37.000000000 -0400
64695 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64696
64697 pte = pte_offset_kernel(pmd, addr);
64698 do {
64699 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64700 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64701 +
64702 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64703 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64704 + BUG_ON(!pte_exec(*pte));
64705 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64706 + continue;
64707 + }
64708 +#endif
64709 +
64710 + {
64711 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64712 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64713 + }
64714 } while (pte++, addr += PAGE_SIZE, addr != end);
64715 }
64716
64717 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64718 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64719 {
64720 pte_t *pte;
64721 + int ret = -ENOMEM;
64722
64723 /*
64724 * nr is a running index into the array which helps higher level
64725 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64726 pte = pte_alloc_kernel(pmd, addr);
64727 if (!pte)
64728 return -ENOMEM;
64729 +
64730 + pax_open_kernel();
64731 do {
64732 struct page *page = pages[*nr];
64733
64734 - if (WARN_ON(!pte_none(*pte)))
64735 - return -EBUSY;
64736 - if (WARN_ON(!page))
64737 - return -ENOMEM;
64738 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64739 + if (pgprot_val(prot) & _PAGE_NX)
64740 +#endif
64741 +
64742 + if (WARN_ON(!pte_none(*pte))) {
64743 + ret = -EBUSY;
64744 + goto out;
64745 + }
64746 + if (WARN_ON(!page)) {
64747 + ret = -ENOMEM;
64748 + goto out;
64749 + }
64750 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64751 (*nr)++;
64752 } while (pte++, addr += PAGE_SIZE, addr != end);
64753 - return 0;
64754 + ret = 0;
64755 +out:
64756 + pax_close_kernel();
64757 + return ret;
64758 }
64759
64760 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64761 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64762 * and fall back on vmalloc() if that fails. Others
64763 * just put it in the vmalloc space.
64764 */
64765 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64766 +#ifdef CONFIG_MODULES
64767 +#ifdef MODULES_VADDR
64768 unsigned long addr = (unsigned long)x;
64769 if (addr >= MODULES_VADDR && addr < MODULES_END)
64770 return 1;
64771 #endif
64772 +
64773 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64774 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64775 + return 1;
64776 +#endif
64777 +
64778 +#endif
64779 +
64780 return is_vmalloc_addr(x);
64781 }
64782
64783 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64784
64785 if (!pgd_none(*pgd)) {
64786 pud_t *pud = pud_offset(pgd, addr);
64787 +#ifdef CONFIG_X86
64788 + if (!pud_large(*pud))
64789 +#endif
64790 if (!pud_none(*pud)) {
64791 pmd_t *pmd = pmd_offset(pud, addr);
64792 +#ifdef CONFIG_X86
64793 + if (!pmd_large(*pmd))
64794 +#endif
64795 if (!pmd_none(*pmd)) {
64796 pte_t *ptep, pte;
64797
64798 @@ -1296,6 +1336,16 @@ static struct vm_struct *__get_vm_area_n
64799 struct vm_struct *area;
64800
64801 BUG_ON(in_interrupt());
64802 +
64803 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64804 + if (flags & VM_KERNEXEC) {
64805 + if (start != VMALLOC_START || end != VMALLOC_END)
64806 + return NULL;
64807 + start = (unsigned long)MODULES_EXEC_VADDR;
64808 + end = (unsigned long)MODULES_EXEC_END;
64809 + }
64810 +#endif
64811 +
64812 if (flags & VM_IOREMAP) {
64813 int bit = fls(size);
64814
64815 @@ -1514,6 +1564,11 @@ void *vmap(struct page **pages, unsigned
64816 if (count > totalram_pages)
64817 return NULL;
64818
64819 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64820 + if (!(pgprot_val(prot) & _PAGE_NX))
64821 + flags |= VM_KERNEXEC;
64822 +#endif
64823 +
64824 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64825 __builtin_return_address(0));
64826 if (!area)
64827 @@ -1610,6 +1665,13 @@ void *__vmalloc_node_range(unsigned long
64828 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64829 return NULL;
64830
64831 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64832 + if (!(pgprot_val(prot) & _PAGE_NX))
64833 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64834 + node, gfp_mask, caller);
64835 + else
64836 +#endif
64837 +
64838 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64839 gfp_mask, caller);
64840
64841 @@ -1649,6 +1711,7 @@ static void *__vmalloc_node(unsigned lon
64842 gfp_mask, prot, node, caller);
64843 }
64844
64845 +#undef __vmalloc
64846 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64847 {
64848 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64849 @@ -1672,6 +1735,7 @@ static inline void *__vmalloc_node_flags
64850 * For tight control over page level allocator and protection flags
64851 * use __vmalloc() instead.
64852 */
64853 +#undef vmalloc
64854 void *vmalloc(unsigned long size)
64855 {
64856 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64857 @@ -1688,6 +1752,7 @@ EXPORT_SYMBOL(vmalloc);
64858 * For tight control over page level allocator and protection flags
64859 * use __vmalloc() instead.
64860 */
64861 +#undef vzalloc
64862 void *vzalloc(unsigned long size)
64863 {
64864 return __vmalloc_node_flags(size, -1,
64865 @@ -1702,6 +1767,7 @@ EXPORT_SYMBOL(vzalloc);
64866 * The resulting memory area is zeroed so it can be mapped to userspace
64867 * without leaking data.
64868 */
64869 +#undef vmalloc_user
64870 void *vmalloc_user(unsigned long size)
64871 {
64872 struct vm_struct *area;
64873 @@ -1729,6 +1795,7 @@ EXPORT_SYMBOL(vmalloc_user);
64874 * For tight control over page level allocator and protection flags
64875 * use __vmalloc() instead.
64876 */
64877 +#undef vmalloc_node
64878 void *vmalloc_node(unsigned long size, int node)
64879 {
64880 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64881 @@ -1748,6 +1815,7 @@ EXPORT_SYMBOL(vmalloc_node);
64882 * For tight control over page level allocator and protection flags
64883 * use __vmalloc_node() instead.
64884 */
64885 +#undef vzalloc_node
64886 void *vzalloc_node(unsigned long size, int node)
64887 {
64888 return __vmalloc_node_flags(size, node,
64889 @@ -1770,10 +1838,10 @@ EXPORT_SYMBOL(vzalloc_node);
64890 * For tight control over page level allocator and protection flags
64891 * use __vmalloc() instead.
64892 */
64893 -
64894 +#undef vmalloc_exec
64895 void *vmalloc_exec(unsigned long size)
64896 {
64897 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64898 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64899 -1, __builtin_return_address(0));
64900 }
64901
64902 @@ -1792,6 +1860,7 @@ void *vmalloc_exec(unsigned long size)
64903 * Allocate enough 32bit PA addressable pages to cover @size from the
64904 * page level allocator and map them into contiguous kernel virtual space.
64905 */
64906 +#undef vmalloc_32
64907 void *vmalloc_32(unsigned long size)
64908 {
64909 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64910 @@ -1806,6 +1875,7 @@ EXPORT_SYMBOL(vmalloc_32);
64911 * The resulting memory area is 32bit addressable and zeroed so it can be
64912 * mapped to userspace without leaking data.
64913 */
64914 +#undef vmalloc_32_user
64915 void *vmalloc_32_user(unsigned long size)
64916 {
64917 struct vm_struct *area;
64918 @@ -2068,6 +2138,8 @@ int remap_vmalloc_range(struct vm_area_s
64919 unsigned long uaddr = vma->vm_start;
64920 unsigned long usize = vma->vm_end - vma->vm_start;
64921
64922 + BUG_ON(vma->vm_mirror);
64923 +
64924 if ((PAGE_SIZE-1) & (unsigned long)addr)
64925 return -EINVAL;
64926
64927 diff -urNp linux-2.6.39.4/mm/vmstat.c linux-2.6.39.4/mm/vmstat.c
64928 --- linux-2.6.39.4/mm/vmstat.c 2011-05-19 00:06:34.000000000 -0400
64929 +++ linux-2.6.39.4/mm/vmstat.c 2011-08-05 19:44:37.000000000 -0400
64930 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64931 *
64932 * vm_stat contains the global counters
64933 */
64934 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64935 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64936 EXPORT_SYMBOL(vm_stat);
64937
64938 #ifdef CONFIG_SMP
64939 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64940 v = p->vm_stat_diff[i];
64941 p->vm_stat_diff[i] = 0;
64942 local_irq_restore(flags);
64943 - atomic_long_add(v, &zone->vm_stat[i]);
64944 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64945 global_diff[i] += v;
64946 #ifdef CONFIG_NUMA
64947 /* 3 seconds idle till flush */
64948 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64949
64950 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64951 if (global_diff[i])
64952 - atomic_long_add(global_diff[i], &vm_stat[i]);
64953 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64954 }
64955
64956 #endif
64957 @@ -1205,10 +1205,20 @@ static int __init setup_vmstat(void)
64958 start_cpu_timer(cpu);
64959 #endif
64960 #ifdef CONFIG_PROC_FS
64961 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64962 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64963 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64964 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64965 + {
64966 + mode_t gr_mode = S_IRUGO;
64967 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64968 + gr_mode = S_IRUSR;
64969 +#endif
64970 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64971 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64972 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64973 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64974 +#else
64975 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64976 +#endif
64977 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64978 + }
64979 #endif
64980 return 0;
64981 }
64982 diff -urNp linux-2.6.39.4/net/8021q/vlan.c linux-2.6.39.4/net/8021q/vlan.c
64983 --- linux-2.6.39.4/net/8021q/vlan.c 2011-05-19 00:06:34.000000000 -0400
64984 +++ linux-2.6.39.4/net/8021q/vlan.c 2011-08-05 19:44:37.000000000 -0400
64985 @@ -592,8 +592,7 @@ static int vlan_ioctl_handler(struct net
64986 err = -EPERM;
64987 if (!capable(CAP_NET_ADMIN))
64988 break;
64989 - if ((args.u.name_type >= 0) &&
64990 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64991 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64992 struct vlan_net *vn;
64993
64994 vn = net_generic(net, vlan_net_id);
64995 diff -urNp linux-2.6.39.4/net/atm/atm_misc.c linux-2.6.39.4/net/atm/atm_misc.c
64996 --- linux-2.6.39.4/net/atm/atm_misc.c 2011-05-19 00:06:34.000000000 -0400
64997 +++ linux-2.6.39.4/net/atm/atm_misc.c 2011-08-05 19:44:37.000000000 -0400
64998 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64999 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
65000 return 1;
65001 atm_return(vcc, truesize);
65002 - atomic_inc(&vcc->stats->rx_drop);
65003 + atomic_inc_unchecked(&vcc->stats->rx_drop);
65004 return 0;
65005 }
65006 EXPORT_SYMBOL(atm_charge);
65007 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
65008 }
65009 }
65010 atm_return(vcc, guess);
65011 - atomic_inc(&vcc->stats->rx_drop);
65012 + atomic_inc_unchecked(&vcc->stats->rx_drop);
65013 return NULL;
65014 }
65015 EXPORT_SYMBOL(atm_alloc_charge);
65016 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
65017
65018 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
65019 {
65020 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
65021 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
65022 __SONET_ITEMS
65023 #undef __HANDLE_ITEM
65024 }
65025 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
65026
65027 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
65028 {
65029 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65030 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
65031 __SONET_ITEMS
65032 #undef __HANDLE_ITEM
65033 }
65034 diff -urNp linux-2.6.39.4/net/atm/lec.h linux-2.6.39.4/net/atm/lec.h
65035 --- linux-2.6.39.4/net/atm/lec.h 2011-05-19 00:06:34.000000000 -0400
65036 +++ linux-2.6.39.4/net/atm/lec.h 2011-08-05 20:34:06.000000000 -0400
65037 @@ -48,7 +48,7 @@ struct lane2_ops {
65038 const u8 *tlvs, u32 sizeoftlvs);
65039 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
65040 const u8 *tlvs, u32 sizeoftlvs);
65041 -};
65042 +} __no_const;
65043
65044 /*
65045 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
65046 diff -urNp linux-2.6.39.4/net/atm/mpc.h linux-2.6.39.4/net/atm/mpc.h
65047 --- linux-2.6.39.4/net/atm/mpc.h 2011-05-19 00:06:34.000000000 -0400
65048 +++ linux-2.6.39.4/net/atm/mpc.h 2011-08-05 20:34:06.000000000 -0400
65049 @@ -33,7 +33,7 @@ struct mpoa_client {
65050 struct mpc_parameters parameters; /* parameters for this client */
65051
65052 const struct net_device_ops *old_ops;
65053 - struct net_device_ops new_ops;
65054 + net_device_ops_no_const new_ops;
65055 };
65056
65057
65058 diff -urNp linux-2.6.39.4/net/atm/mpoa_caches.c linux-2.6.39.4/net/atm/mpoa_caches.c
65059 --- linux-2.6.39.4/net/atm/mpoa_caches.c 2011-05-19 00:06:34.000000000 -0400
65060 +++ linux-2.6.39.4/net/atm/mpoa_caches.c 2011-08-05 19:44:37.000000000 -0400
65061 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
65062 struct timeval now;
65063 struct k_message msg;
65064
65065 + pax_track_stack();
65066 +
65067 do_gettimeofday(&now);
65068
65069 read_lock_bh(&client->ingress_lock);
65070 diff -urNp linux-2.6.39.4/net/atm/proc.c linux-2.6.39.4/net/atm/proc.c
65071 --- linux-2.6.39.4/net/atm/proc.c 2011-05-19 00:06:34.000000000 -0400
65072 +++ linux-2.6.39.4/net/atm/proc.c 2011-08-05 19:44:37.000000000 -0400
65073 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
65074 const struct k_atm_aal_stats *stats)
65075 {
65076 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
65077 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
65078 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
65079 - atomic_read(&stats->rx_drop));
65080 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
65081 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
65082 + atomic_read_unchecked(&stats->rx_drop));
65083 }
65084
65085 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
65086 @@ -191,7 +191,12 @@ static void vcc_info(struct seq_file *se
65087 {
65088 struct sock *sk = sk_atm(vcc);
65089
65090 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65091 + seq_printf(seq, "%p ", NULL);
65092 +#else
65093 seq_printf(seq, "%p ", vcc);
65094 +#endif
65095 +
65096 if (!vcc->dev)
65097 seq_printf(seq, "Unassigned ");
65098 else
65099 @@ -218,7 +223,11 @@ static void svc_info(struct seq_file *se
65100 {
65101 if (!vcc->dev)
65102 seq_printf(seq, sizeof(void *) == 4 ?
65103 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65104 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
65105 +#else
65106 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
65107 +#endif
65108 else
65109 seq_printf(seq, "%3d %3d %5d ",
65110 vcc->dev->number, vcc->vpi, vcc->vci);
65111 diff -urNp linux-2.6.39.4/net/atm/resources.c linux-2.6.39.4/net/atm/resources.c
65112 --- linux-2.6.39.4/net/atm/resources.c 2011-05-19 00:06:34.000000000 -0400
65113 +++ linux-2.6.39.4/net/atm/resources.c 2011-08-05 19:44:37.000000000 -0400
65114 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
65115 static void copy_aal_stats(struct k_atm_aal_stats *from,
65116 struct atm_aal_stats *to)
65117 {
65118 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
65119 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
65120 __AAL_STAT_ITEMS
65121 #undef __HANDLE_ITEM
65122 }
65123 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
65124 static void subtract_aal_stats(struct k_atm_aal_stats *from,
65125 struct atm_aal_stats *to)
65126 {
65127 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65128 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
65129 __AAL_STAT_ITEMS
65130 #undef __HANDLE_ITEM
65131 }
65132 diff -urNp linux-2.6.39.4/net/batman-adv/hard-interface.c linux-2.6.39.4/net/batman-adv/hard-interface.c
65133 --- linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-05-19 00:06:34.000000000 -0400
65134 +++ linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-08-05 19:44:37.000000000 -0400
65135 @@ -339,8 +339,8 @@ int hardif_enable_interface(struct hard_
65136 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
65137 dev_add_pack(&hard_iface->batman_adv_ptype);
65138
65139 - atomic_set(&hard_iface->seqno, 1);
65140 - atomic_set(&hard_iface->frag_seqno, 1);
65141 + atomic_set_unchecked(&hard_iface->seqno, 1);
65142 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
65143 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
65144 hard_iface->net_dev->name);
65145
65146 diff -urNp linux-2.6.39.4/net/batman-adv/routing.c linux-2.6.39.4/net/batman-adv/routing.c
65147 --- linux-2.6.39.4/net/batman-adv/routing.c 2011-05-19 00:06:34.000000000 -0400
65148 +++ linux-2.6.39.4/net/batman-adv/routing.c 2011-08-05 19:44:37.000000000 -0400
65149 @@ -625,7 +625,7 @@ void receive_bat_packet(struct ethhdr *e
65150 return;
65151
65152 /* could be changed by schedule_own_packet() */
65153 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
65154 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
65155
65156 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
65157
65158 diff -urNp linux-2.6.39.4/net/batman-adv/send.c linux-2.6.39.4/net/batman-adv/send.c
65159 --- linux-2.6.39.4/net/batman-adv/send.c 2011-05-19 00:06:34.000000000 -0400
65160 +++ linux-2.6.39.4/net/batman-adv/send.c 2011-08-05 19:44:37.000000000 -0400
65161 @@ -277,7 +277,7 @@ void schedule_own_packet(struct hard_ifa
65162
65163 /* change sequence number to network order */
65164 batman_packet->seqno =
65165 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
65166 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
65167
65168 if (vis_server == VIS_TYPE_SERVER_SYNC)
65169 batman_packet->flags |= VIS_SERVER;
65170 @@ -291,7 +291,7 @@ void schedule_own_packet(struct hard_ifa
65171 else
65172 batman_packet->gw_flags = 0;
65173
65174 - atomic_inc(&hard_iface->seqno);
65175 + atomic_inc_unchecked(&hard_iface->seqno);
65176
65177 slide_own_bcast_window(hard_iface);
65178 send_time = own_send_time(bat_priv);
65179 diff -urNp linux-2.6.39.4/net/batman-adv/soft-interface.c linux-2.6.39.4/net/batman-adv/soft-interface.c
65180 --- linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-05-19 00:06:34.000000000 -0400
65181 +++ linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-08-05 19:44:37.000000000 -0400
65182 @@ -386,7 +386,7 @@ int interface_tx(struct sk_buff *skb, st
65183
65184 /* set broadcast sequence number */
65185 bcast_packet->seqno =
65186 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
65187 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
65188
65189 add_bcast_packet_to_list(bat_priv, skb);
65190
65191 @@ -579,7 +579,7 @@ struct net_device *softif_create(char *n
65192 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
65193
65194 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
65195 - atomic_set(&bat_priv->bcast_seqno, 1);
65196 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
65197 atomic_set(&bat_priv->hna_local_changed, 0);
65198
65199 bat_priv->primary_if = NULL;
65200 diff -urNp linux-2.6.39.4/net/batman-adv/types.h linux-2.6.39.4/net/batman-adv/types.h
65201 --- linux-2.6.39.4/net/batman-adv/types.h 2011-05-19 00:06:34.000000000 -0400
65202 +++ linux-2.6.39.4/net/batman-adv/types.h 2011-08-05 19:44:37.000000000 -0400
65203 @@ -38,8 +38,8 @@ struct hard_iface {
65204 int16_t if_num;
65205 char if_status;
65206 struct net_device *net_dev;
65207 - atomic_t seqno;
65208 - atomic_t frag_seqno;
65209 + atomic_unchecked_t seqno;
65210 + atomic_unchecked_t frag_seqno;
65211 unsigned char *packet_buff;
65212 int packet_len;
65213 struct kobject *hardif_obj;
65214 @@ -141,7 +141,7 @@ struct bat_priv {
65215 atomic_t orig_interval; /* uint */
65216 atomic_t hop_penalty; /* uint */
65217 atomic_t log_level; /* uint */
65218 - atomic_t bcast_seqno;
65219 + atomic_unchecked_t bcast_seqno;
65220 atomic_t bcast_queue_left;
65221 atomic_t batman_queue_left;
65222 char num_ifaces;
65223 diff -urNp linux-2.6.39.4/net/batman-adv/unicast.c linux-2.6.39.4/net/batman-adv/unicast.c
65224 --- linux-2.6.39.4/net/batman-adv/unicast.c 2011-05-19 00:06:34.000000000 -0400
65225 +++ linux-2.6.39.4/net/batman-adv/unicast.c 2011-08-05 19:44:37.000000000 -0400
65226 @@ -263,7 +263,7 @@ int frag_send_skb(struct sk_buff *skb, s
65227 frag1->flags = UNI_FRAG_HEAD | large_tail;
65228 frag2->flags = large_tail;
65229
65230 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
65231 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
65232 frag1->seqno = htons(seqno - 1);
65233 frag2->seqno = htons(seqno);
65234
65235 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_core.c linux-2.6.39.4/net/bluetooth/l2cap_core.c
65236 --- linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-05-19 00:06:34.000000000 -0400
65237 +++ linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-08-05 19:44:37.000000000 -0400
65238 @@ -2202,7 +2202,7 @@ static inline int l2cap_config_req(struc
65239
65240 /* Reject if config buffer is too small. */
65241 len = cmd_len - sizeof(*req);
65242 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65243 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65244 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
65245 l2cap_build_conf_rsp(sk, rsp,
65246 L2CAP_CONF_REJECT, flags), rsp);
65247 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_sock.c linux-2.6.39.4/net/bluetooth/l2cap_sock.c
65248 --- linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-05-19 00:06:34.000000000 -0400
65249 +++ linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-08-05 19:44:37.000000000 -0400
65250 @@ -446,6 +446,7 @@ static int l2cap_sock_getsockopt_old(str
65251 break;
65252 }
65253
65254 + memset(&cinfo, 0, sizeof(cinfo));
65255 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
65256 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
65257
65258 diff -urNp linux-2.6.39.4/net/bluetooth/rfcomm/sock.c linux-2.6.39.4/net/bluetooth/rfcomm/sock.c
65259 --- linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-05-19 00:06:34.000000000 -0400
65260 +++ linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-08-05 19:44:37.000000000 -0400
65261 @@ -787,6 +787,7 @@ static int rfcomm_sock_getsockopt_old(st
65262
65263 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
65264
65265 + memset(&cinfo, 0, sizeof(cinfo));
65266 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
65267 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
65268
65269 diff -urNp linux-2.6.39.4/net/bridge/br_multicast.c linux-2.6.39.4/net/bridge/br_multicast.c
65270 --- linux-2.6.39.4/net/bridge/br_multicast.c 2011-05-19 00:06:34.000000000 -0400
65271 +++ linux-2.6.39.4/net/bridge/br_multicast.c 2011-08-05 19:44:37.000000000 -0400
65272 @@ -1482,7 +1482,7 @@ static int br_multicast_ipv6_rcv(struct
65273 nexthdr = ip6h->nexthdr;
65274 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
65275
65276 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
65277 + if (nexthdr != IPPROTO_ICMPV6)
65278 return 0;
65279
65280 /* Okay, we found ICMPv6 header */
65281 diff -urNp linux-2.6.39.4/net/bridge/netfilter/ebtables.c linux-2.6.39.4/net/bridge/netfilter/ebtables.c
65282 --- linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-05-19 00:06:34.000000000 -0400
65283 +++ linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-08-05 19:44:37.000000000 -0400
65284 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
65285 tmp.valid_hooks = t->table->valid_hooks;
65286 }
65287 mutex_unlock(&ebt_mutex);
65288 - if (copy_to_user(user, &tmp, *len) != 0){
65289 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
65290 BUGPRINT("c2u Didn't work\n");
65291 ret = -EFAULT;
65292 break;
65293 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
65294 int ret;
65295 void __user *pos;
65296
65297 + pax_track_stack();
65298 +
65299 memset(&tinfo, 0, sizeof(tinfo));
65300
65301 if (cmd == EBT_SO_GET_ENTRIES) {
65302 diff -urNp linux-2.6.39.4/net/caif/caif_socket.c linux-2.6.39.4/net/caif/caif_socket.c
65303 --- linux-2.6.39.4/net/caif/caif_socket.c 2011-05-19 00:06:34.000000000 -0400
65304 +++ linux-2.6.39.4/net/caif/caif_socket.c 2011-08-05 19:44:37.000000000 -0400
65305 @@ -48,18 +48,19 @@ static struct dentry *debugfsdir;
65306 #ifdef CONFIG_DEBUG_FS
65307 struct debug_fs_counter {
65308 atomic_t caif_nr_socks;
65309 - atomic_t num_connect_req;
65310 - atomic_t num_connect_resp;
65311 - atomic_t num_connect_fail_resp;
65312 - atomic_t num_disconnect;
65313 - atomic_t num_remote_shutdown_ind;
65314 - atomic_t num_tx_flow_off_ind;
65315 - atomic_t num_tx_flow_on_ind;
65316 - atomic_t num_rx_flow_off;
65317 - atomic_t num_rx_flow_on;
65318 + atomic_unchecked_t num_connect_req;
65319 + atomic_unchecked_t num_connect_resp;
65320 + atomic_unchecked_t num_connect_fail_resp;
65321 + atomic_unchecked_t num_disconnect;
65322 + atomic_unchecked_t num_remote_shutdown_ind;
65323 + atomic_unchecked_t num_tx_flow_off_ind;
65324 + atomic_unchecked_t num_tx_flow_on_ind;
65325 + atomic_unchecked_t num_rx_flow_off;
65326 + atomic_unchecked_t num_rx_flow_on;
65327 };
65328 static struct debug_fs_counter cnt;
65329 #define dbfs_atomic_inc(v) atomic_inc(v)
65330 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_unchecked(v)
65331 #define dbfs_atomic_dec(v) atomic_dec(v)
65332 #else
65333 #define dbfs_atomic_inc(v)
65334 @@ -159,7 +160,7 @@ static int caif_queue_rcv_skb(struct soc
65335 atomic_read(&cf_sk->sk.sk_rmem_alloc),
65336 sk_rcvbuf_lowwater(cf_sk));
65337 set_rx_flow_off(cf_sk);
65338 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65339 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65340 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65341 }
65342
65343 @@ -169,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc
65344 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
65345 set_rx_flow_off(cf_sk);
65346 pr_debug("sending flow OFF due to rmem_schedule\n");
65347 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65348 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65349 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65350 }
65351 skb->dev = NULL;
65352 @@ -218,21 +219,21 @@ static void caif_ctrl_cb(struct cflayer
65353 switch (flow) {
65354 case CAIF_CTRLCMD_FLOW_ON_IND:
65355 /* OK from modem to start sending again */
65356 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
65357 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
65358 set_tx_flow_on(cf_sk);
65359 cf_sk->sk.sk_state_change(&cf_sk->sk);
65360 break;
65361
65362 case CAIF_CTRLCMD_FLOW_OFF_IND:
65363 /* Modem asks us to shut up */
65364 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
65365 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
65366 set_tx_flow_off(cf_sk);
65367 cf_sk->sk.sk_state_change(&cf_sk->sk);
65368 break;
65369
65370 case CAIF_CTRLCMD_INIT_RSP:
65371 /* We're now connected */
65372 - dbfs_atomic_inc(&cnt.num_connect_resp);
65373 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
65374 cf_sk->sk.sk_state = CAIF_CONNECTED;
65375 set_tx_flow_on(cf_sk);
65376 cf_sk->sk.sk_state_change(&cf_sk->sk);
65377 @@ -247,7 +248,7 @@ static void caif_ctrl_cb(struct cflayer
65378
65379 case CAIF_CTRLCMD_INIT_FAIL_RSP:
65380 /* Connect request failed */
65381 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
65382 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
65383 cf_sk->sk.sk_err = ECONNREFUSED;
65384 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
65385 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65386 @@ -261,7 +262,7 @@ static void caif_ctrl_cb(struct cflayer
65387
65388 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
65389 /* Modem has closed this connection, or device is down. */
65390 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
65391 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
65392 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65393 cf_sk->sk.sk_err = ECONNRESET;
65394 set_rx_flow_on(cf_sk);
65395 @@ -281,7 +282,7 @@ static void caif_check_flow_release(stru
65396 return;
65397
65398 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
65399 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
65400 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
65401 set_rx_flow_on(cf_sk);
65402 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
65403 }
65404 @@ -864,7 +865,7 @@ static int caif_connect(struct socket *s
65405 /*ifindex = id of the interface.*/
65406 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
65407
65408 - dbfs_atomic_inc(&cnt.num_connect_req);
65409 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
65410 cf_sk->layer.receive = caif_sktrecv_cb;
65411 err = caif_connect_client(&cf_sk->conn_req,
65412 &cf_sk->layer, &ifindex, &headroom, &tailroom);
65413 @@ -952,7 +953,7 @@ static int caif_release(struct socket *s
65414 spin_unlock(&sk->sk_receive_queue.lock);
65415 sock->sk = NULL;
65416
65417 - dbfs_atomic_inc(&cnt.num_disconnect);
65418 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
65419
65420 if (cf_sk->debugfs_socket_dir != NULL)
65421 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
65422 diff -urNp linux-2.6.39.4/net/caif/cfctrl.c linux-2.6.39.4/net/caif/cfctrl.c
65423 --- linux-2.6.39.4/net/caif/cfctrl.c 2011-05-19 00:06:34.000000000 -0400
65424 +++ linux-2.6.39.4/net/caif/cfctrl.c 2011-08-05 19:44:37.000000000 -0400
65425 @@ -9,6 +9,7 @@
65426 #include <linux/stddef.h>
65427 #include <linux/spinlock.h>
65428 #include <linux/slab.h>
65429 +#include <linux/sched.h>
65430 #include <net/caif/caif_layer.h>
65431 #include <net/caif/cfpkt.h>
65432 #include <net/caif/cfctrl.h>
65433 @@ -46,8 +47,8 @@ struct cflayer *cfctrl_create(void)
65434 dev_info.id = 0xff;
65435 memset(this, 0, sizeof(*this));
65436 cfsrvl_init(&this->serv, 0, &dev_info, false);
65437 - atomic_set(&this->req_seq_no, 1);
65438 - atomic_set(&this->rsp_seq_no, 1);
65439 + atomic_set_unchecked(&this->req_seq_no, 1);
65440 + atomic_set_unchecked(&this->rsp_seq_no, 1);
65441 this->serv.layer.receive = cfctrl_recv;
65442 sprintf(this->serv.layer.name, "ctrl");
65443 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65444 @@ -116,8 +117,8 @@ void cfctrl_insert_req(struct cfctrl *ct
65445 struct cfctrl_request_info *req)
65446 {
65447 spin_lock(&ctrl->info_list_lock);
65448 - atomic_inc(&ctrl->req_seq_no);
65449 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
65450 + atomic_inc_unchecked(&ctrl->req_seq_no);
65451 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65452 list_add_tail(&req->list, &ctrl->list);
65453 spin_unlock(&ctrl->info_list_lock);
65454 }
65455 @@ -136,7 +137,7 @@ struct cfctrl_request_info *cfctrl_remov
65456 if (p != first)
65457 pr_warn("Requests are not received in order\n");
65458
65459 - atomic_set(&ctrl->rsp_seq_no,
65460 + atomic_set_unchecked(&ctrl->rsp_seq_no,
65461 p->sequence_no);
65462 list_del(&p->list);
65463 goto out;
65464 @@ -385,6 +386,7 @@ static int cfctrl_recv(struct cflayer *l
65465 struct cfctrl *cfctrl = container_obj(layer);
65466 struct cfctrl_request_info rsp, *req;
65467
65468 + pax_track_stack();
65469
65470 cfpkt_extr_head(pkt, &cmdrsp, 1);
65471 cmd = cmdrsp & CFCTRL_CMD_MASK;
65472 diff -urNp linux-2.6.39.4/net/can/bcm.c linux-2.6.39.4/net/can/bcm.c
65473 --- linux-2.6.39.4/net/can/bcm.c 2011-05-19 00:06:34.000000000 -0400
65474 +++ linux-2.6.39.4/net/can/bcm.c 2011-08-05 19:44:37.000000000 -0400
65475 @@ -165,9 +165,15 @@ static int bcm_proc_show(struct seq_file
65476 struct bcm_sock *bo = bcm_sk(sk);
65477 struct bcm_op *op;
65478
65479 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65480 + seq_printf(m, ">>> socket %p", NULL);
65481 + seq_printf(m, " / sk %p", NULL);
65482 + seq_printf(m, " / bo %p", NULL);
65483 +#else
65484 seq_printf(m, ">>> socket %p", sk->sk_socket);
65485 seq_printf(m, " / sk %p", sk);
65486 seq_printf(m, " / bo %p", bo);
65487 +#endif
65488 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
65489 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
65490 seq_printf(m, " <<<\n");
65491 diff -urNp linux-2.6.39.4/net/core/datagram.c linux-2.6.39.4/net/core/datagram.c
65492 --- linux-2.6.39.4/net/core/datagram.c 2011-05-19 00:06:34.000000000 -0400
65493 +++ linux-2.6.39.4/net/core/datagram.c 2011-08-05 19:44:37.000000000 -0400
65494 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65495 }
65496
65497 kfree_skb(skb);
65498 - atomic_inc(&sk->sk_drops);
65499 + atomic_inc_unchecked(&sk->sk_drops);
65500 sk_mem_reclaim_partial(sk);
65501
65502 return err;
65503 diff -urNp linux-2.6.39.4/net/core/dev.c linux-2.6.39.4/net/core/dev.c
65504 --- linux-2.6.39.4/net/core/dev.c 2011-06-03 00:04:14.000000000 -0400
65505 +++ linux-2.6.39.4/net/core/dev.c 2011-08-05 20:34:06.000000000 -0400
65506 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65507 if (no_module && capable(CAP_NET_ADMIN))
65508 no_module = request_module("netdev-%s", name);
65509 if (no_module && capable(CAP_SYS_MODULE)) {
65510 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65511 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
65512 +#else
65513 if (!request_module("%s", name))
65514 pr_err("Loading kernel module for a network device "
65515 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65516 "instead\n", name);
65517 +#endif
65518 }
65519 }
65520 EXPORT_SYMBOL(dev_load);
65521 @@ -1951,7 +1955,7 @@ static int illegal_highdma(struct net_de
65522
65523 struct dev_gso_cb {
65524 void (*destructor)(struct sk_buff *skb);
65525 -};
65526 +} __no_const;
65527
65528 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65529
65530 @@ -2901,7 +2905,7 @@ int netif_rx_ni(struct sk_buff *skb)
65531 }
65532 EXPORT_SYMBOL(netif_rx_ni);
65533
65534 -static void net_tx_action(struct softirq_action *h)
65535 +static void net_tx_action(void)
65536 {
65537 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65538
65539 @@ -3765,7 +3769,7 @@ void netif_napi_del(struct napi_struct *
65540 }
65541 EXPORT_SYMBOL(netif_napi_del);
65542
65543 -static void net_rx_action(struct softirq_action *h)
65544 +static void net_rx_action(void)
65545 {
65546 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65547 unsigned long time_limit = jiffies + 2;
65548 diff -urNp linux-2.6.39.4/net/core/flow.c linux-2.6.39.4/net/core/flow.c
65549 --- linux-2.6.39.4/net/core/flow.c 2011-05-19 00:06:34.000000000 -0400
65550 +++ linux-2.6.39.4/net/core/flow.c 2011-08-05 19:44:37.000000000 -0400
65551 @@ -60,7 +60,7 @@ struct flow_cache {
65552 struct timer_list rnd_timer;
65553 };
65554
65555 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
65556 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65557 EXPORT_SYMBOL(flow_cache_genid);
65558 static struct flow_cache flow_cache_global;
65559 static struct kmem_cache *flow_cachep __read_mostly;
65560 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65561
65562 static int flow_entry_valid(struct flow_cache_entry *fle)
65563 {
65564 - if (atomic_read(&flow_cache_genid) != fle->genid)
65565 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65566 return 0;
65567 if (fle->object && !fle->object->ops->check(fle->object))
65568 return 0;
65569 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65570 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65571 fcp->hash_count++;
65572 }
65573 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65574 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65575 flo = fle->object;
65576 if (!flo)
65577 goto ret_object;
65578 @@ -274,7 +274,7 @@ nocache:
65579 }
65580 flo = resolver(net, key, family, dir, flo, ctx);
65581 if (fle) {
65582 - fle->genid = atomic_read(&flow_cache_genid);
65583 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
65584 if (!IS_ERR(flo))
65585 fle->object = flo;
65586 else
65587 diff -urNp linux-2.6.39.4/net/core/rtnetlink.c linux-2.6.39.4/net/core/rtnetlink.c
65588 --- linux-2.6.39.4/net/core/rtnetlink.c 2011-05-19 00:06:34.000000000 -0400
65589 +++ linux-2.6.39.4/net/core/rtnetlink.c 2011-08-05 20:34:06.000000000 -0400
65590 @@ -56,7 +56,7 @@
65591 struct rtnl_link {
65592 rtnl_doit_func doit;
65593 rtnl_dumpit_func dumpit;
65594 -};
65595 +} __no_const;
65596
65597 static DEFINE_MUTEX(rtnl_mutex);
65598
65599 diff -urNp linux-2.6.39.4/net/core/skbuff.c linux-2.6.39.4/net/core/skbuff.c
65600 --- linux-2.6.39.4/net/core/skbuff.c 2011-06-03 00:04:14.000000000 -0400
65601 +++ linux-2.6.39.4/net/core/skbuff.c 2011-08-05 19:44:37.000000000 -0400
65602 @@ -1542,6 +1542,8 @@ int skb_splice_bits(struct sk_buff *skb,
65603 struct sock *sk = skb->sk;
65604 int ret = 0;
65605
65606 + pax_track_stack();
65607 +
65608 if (splice_grow_spd(pipe, &spd))
65609 return -ENOMEM;
65610
65611 diff -urNp linux-2.6.39.4/net/core/sock.c linux-2.6.39.4/net/core/sock.c
65612 --- linux-2.6.39.4/net/core/sock.c 2011-05-19 00:06:34.000000000 -0400
65613 +++ linux-2.6.39.4/net/core/sock.c 2011-08-05 19:44:37.000000000 -0400
65614 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65615 */
65616 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65617 (unsigned)sk->sk_rcvbuf) {
65618 - atomic_inc(&sk->sk_drops);
65619 + atomic_inc_unchecked(&sk->sk_drops);
65620 return -ENOMEM;
65621 }
65622
65623 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65624 return err;
65625
65626 if (!sk_rmem_schedule(sk, skb->truesize)) {
65627 - atomic_inc(&sk->sk_drops);
65628 + atomic_inc_unchecked(&sk->sk_drops);
65629 return -ENOBUFS;
65630 }
65631
65632 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65633 skb_dst_force(skb);
65634
65635 spin_lock_irqsave(&list->lock, flags);
65636 - skb->dropcount = atomic_read(&sk->sk_drops);
65637 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65638 __skb_queue_tail(list, skb);
65639 spin_unlock_irqrestore(&list->lock, flags);
65640
65641 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65642 skb->dev = NULL;
65643
65644 if (sk_rcvqueues_full(sk, skb)) {
65645 - atomic_inc(&sk->sk_drops);
65646 + atomic_inc_unchecked(&sk->sk_drops);
65647 goto discard_and_relse;
65648 }
65649 if (nested)
65650 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65651 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65652 } else if (sk_add_backlog(sk, skb)) {
65653 bh_unlock_sock(sk);
65654 - atomic_inc(&sk->sk_drops);
65655 + atomic_inc_unchecked(&sk->sk_drops);
65656 goto discard_and_relse;
65657 }
65658
65659 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65660 return -ENOTCONN;
65661 if (lv < len)
65662 return -EINVAL;
65663 - if (copy_to_user(optval, address, len))
65664 + if (len > sizeof(address) || copy_to_user(optval, address, len))
65665 return -EFAULT;
65666 goto lenout;
65667 }
65668 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65669
65670 if (len > lv)
65671 len = lv;
65672 - if (copy_to_user(optval, &v, len))
65673 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
65674 return -EFAULT;
65675 lenout:
65676 if (put_user(len, optlen))
65677 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65678 */
65679 smp_wmb();
65680 atomic_set(&sk->sk_refcnt, 1);
65681 - atomic_set(&sk->sk_drops, 0);
65682 + atomic_set_unchecked(&sk->sk_drops, 0);
65683 }
65684 EXPORT_SYMBOL(sock_init_data);
65685
65686 diff -urNp linux-2.6.39.4/net/decnet/sysctl_net_decnet.c linux-2.6.39.4/net/decnet/sysctl_net_decnet.c
65687 --- linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-05-19 00:06:34.000000000 -0400
65688 +++ linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-08-05 19:44:37.000000000 -0400
65689 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65690
65691 if (len > *lenp) len = *lenp;
65692
65693 - if (copy_to_user(buffer, addr, len))
65694 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
65695 return -EFAULT;
65696
65697 *lenp = len;
65698 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65699
65700 if (len > *lenp) len = *lenp;
65701
65702 - if (copy_to_user(buffer, devname, len))
65703 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
65704 return -EFAULT;
65705
65706 *lenp = len;
65707 diff -urNp linux-2.6.39.4/net/econet/Kconfig linux-2.6.39.4/net/econet/Kconfig
65708 --- linux-2.6.39.4/net/econet/Kconfig 2011-05-19 00:06:34.000000000 -0400
65709 +++ linux-2.6.39.4/net/econet/Kconfig 2011-08-05 19:44:37.000000000 -0400
65710 @@ -4,7 +4,7 @@
65711
65712 config ECONET
65713 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65714 - depends on EXPERIMENTAL && INET
65715 + depends on EXPERIMENTAL && INET && BROKEN
65716 ---help---
65717 Econet is a fairly old and slow networking protocol mainly used by
65718 Acorn computers to access file and print servers. It uses native
65719 diff -urNp linux-2.6.39.4/net/ipv4/fib_frontend.c linux-2.6.39.4/net/ipv4/fib_frontend.c
65720 --- linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-05-19 00:06:34.000000000 -0400
65721 +++ linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-08-05 19:44:37.000000000 -0400
65722 @@ -968,12 +968,12 @@ static int fib_inetaddr_event(struct not
65723 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65724 fib_sync_up(dev);
65725 #endif
65726 - atomic_inc(&net->ipv4.dev_addr_genid);
65727 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65728 rt_cache_flush(dev_net(dev), -1);
65729 break;
65730 case NETDEV_DOWN:
65731 fib_del_ifaddr(ifa, NULL);
65732 - atomic_inc(&net->ipv4.dev_addr_genid);
65733 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65734 if (ifa->ifa_dev->ifa_list == NULL) {
65735 /* Last address was deleted from this interface.
65736 * Disable IP.
65737 @@ -1009,7 +1009,7 @@ static int fib_netdev_event(struct notif
65738 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65739 fib_sync_up(dev);
65740 #endif
65741 - atomic_inc(&net->ipv4.dev_addr_genid);
65742 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65743 rt_cache_flush(dev_net(dev), -1);
65744 break;
65745 case NETDEV_DOWN:
65746 diff -urNp linux-2.6.39.4/net/ipv4/fib_semantics.c linux-2.6.39.4/net/ipv4/fib_semantics.c
65747 --- linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-05-19 00:06:34.000000000 -0400
65748 +++ linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-08-05 19:44:37.000000000 -0400
65749 @@ -701,7 +701,7 @@ __be32 fib_info_update_nh_saddr(struct n
65750 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65751 nh->nh_gw,
65752 nh->nh_parent->fib_scope);
65753 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65754 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65755
65756 return nh->nh_saddr;
65757 }
65758 diff -urNp linux-2.6.39.4/net/ipv4/inet_diag.c linux-2.6.39.4/net/ipv4/inet_diag.c
65759 --- linux-2.6.39.4/net/ipv4/inet_diag.c 2011-07-09 09:18:51.000000000 -0400
65760 +++ linux-2.6.39.4/net/ipv4/inet_diag.c 2011-08-05 19:44:37.000000000 -0400
65761 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65762 r->idiag_retrans = 0;
65763
65764 r->id.idiag_if = sk->sk_bound_dev_if;
65765 +
65766 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65767 + r->id.idiag_cookie[0] = 0;
65768 + r->id.idiag_cookie[1] = 0;
65769 +#else
65770 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65771 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65772 +#endif
65773
65774 r->id.idiag_sport = inet->inet_sport;
65775 r->id.idiag_dport = inet->inet_dport;
65776 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65777 r->idiag_family = tw->tw_family;
65778 r->idiag_retrans = 0;
65779 r->id.idiag_if = tw->tw_bound_dev_if;
65780 +
65781 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65782 + r->id.idiag_cookie[0] = 0;
65783 + r->id.idiag_cookie[1] = 0;
65784 +#else
65785 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65786 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65787 +#endif
65788 +
65789 r->id.idiag_sport = tw->tw_sport;
65790 r->id.idiag_dport = tw->tw_dport;
65791 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65792 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65793 if (sk == NULL)
65794 goto unlock;
65795
65796 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65797 err = -ESTALE;
65798 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65799 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65800 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65801 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65802 goto out;
65803 +#endif
65804
65805 err = -ENOMEM;
65806 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65807 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65808 r->idiag_retrans = req->retrans;
65809
65810 r->id.idiag_if = sk->sk_bound_dev_if;
65811 +
65812 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65813 + r->id.idiag_cookie[0] = 0;
65814 + r->id.idiag_cookie[1] = 0;
65815 +#else
65816 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65817 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65818 +#endif
65819
65820 tmo = req->expires - jiffies;
65821 if (tmo < 0)
65822 diff -urNp linux-2.6.39.4/net/ipv4/inet_hashtables.c linux-2.6.39.4/net/ipv4/inet_hashtables.c
65823 --- linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-05-19 00:06:34.000000000 -0400
65824 +++ linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-08-05 19:44:37.000000000 -0400
65825 @@ -18,11 +18,14 @@
65826 #include <linux/sched.h>
65827 #include <linux/slab.h>
65828 #include <linux/wait.h>
65829 +#include <linux/security.h>
65830
65831 #include <net/inet_connection_sock.h>
65832 #include <net/inet_hashtables.h>
65833 #include <net/ip.h>
65834
65835 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65836 +
65837 /*
65838 * Allocate and initialize a new local port bind bucket.
65839 * The bindhash mutex for snum's hash chain must be held here.
65840 @@ -529,6 +532,8 @@ ok:
65841 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65842 spin_unlock(&head->lock);
65843
65844 + gr_update_task_in_ip_table(current, inet_sk(sk));
65845 +
65846 if (tw) {
65847 inet_twsk_deschedule(tw, death_row);
65848 while (twrefcnt) {
65849 diff -urNp linux-2.6.39.4/net/ipv4/inetpeer.c linux-2.6.39.4/net/ipv4/inetpeer.c
65850 --- linux-2.6.39.4/net/ipv4/inetpeer.c 2011-07-09 09:18:51.000000000 -0400
65851 +++ linux-2.6.39.4/net/ipv4/inetpeer.c 2011-08-05 19:44:37.000000000 -0400
65852 @@ -480,6 +480,8 @@ struct inet_peer *inet_getpeer(struct in
65853 unsigned int sequence;
65854 int invalidated, newrefcnt = 0;
65855
65856 + pax_track_stack();
65857 +
65858 /* Look up for the address quickly, lockless.
65859 * Because of a concurrent writer, we might not find an existing entry.
65860 */
65861 @@ -516,8 +518,8 @@ found: /* The existing node has been fo
65862 if (p) {
65863 p->daddr = *daddr;
65864 atomic_set(&p->refcnt, 1);
65865 - atomic_set(&p->rid, 0);
65866 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65867 + atomic_set_unchecked(&p->rid, 0);
65868 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65869 p->tcp_ts_stamp = 0;
65870 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65871 p->rate_tokens = 0;
65872 diff -urNp linux-2.6.39.4/net/ipv4/ip_fragment.c linux-2.6.39.4/net/ipv4/ip_fragment.c
65873 --- linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-05-19 00:06:34.000000000 -0400
65874 +++ linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-08-05 19:44:37.000000000 -0400
65875 @@ -297,7 +297,7 @@ static inline int ip_frag_too_far(struct
65876 return 0;
65877
65878 start = qp->rid;
65879 - end = atomic_inc_return(&peer->rid);
65880 + end = atomic_inc_return_unchecked(&peer->rid);
65881 qp->rid = end;
65882
65883 rc = qp->q.fragments && (end - start) > max;
65884 diff -urNp linux-2.6.39.4/net/ipv4/ip_sockglue.c linux-2.6.39.4/net/ipv4/ip_sockglue.c
65885 --- linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-05-19 00:06:34.000000000 -0400
65886 +++ linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-08-05 19:44:37.000000000 -0400
65887 @@ -1064,6 +1064,8 @@ static int do_ip_getsockopt(struct sock
65888 int val;
65889 int len;
65890
65891 + pax_track_stack();
65892 +
65893 if (level != SOL_IP)
65894 return -EOPNOTSUPP;
65895
65896 diff -urNp linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
65897 --- linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-05-19 00:06:34.000000000 -0400
65898 +++ linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-05 19:44:37.000000000 -0400
65899 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65900
65901 *len = 0;
65902
65903 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65904 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65905 if (*octets == NULL) {
65906 if (net_ratelimit())
65907 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65908 diff -urNp linux-2.6.39.4/net/ipv4/raw.c linux-2.6.39.4/net/ipv4/raw.c
65909 --- linux-2.6.39.4/net/ipv4/raw.c 2011-05-19 00:06:34.000000000 -0400
65910 +++ linux-2.6.39.4/net/ipv4/raw.c 2011-08-14 11:22:59.000000000 -0400
65911 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65912 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65913 {
65914 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65915 - atomic_inc(&sk->sk_drops);
65916 + atomic_inc_unchecked(&sk->sk_drops);
65917 kfree_skb(skb);
65918 return NET_RX_DROP;
65919 }
65920 @@ -730,16 +730,20 @@ static int raw_init(struct sock *sk)
65921
65922 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65923 {
65924 + struct icmp_filter filter;
65925 +
65926 if (optlen > sizeof(struct icmp_filter))
65927 optlen = sizeof(struct icmp_filter);
65928 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65929 + if (copy_from_user(&filter, optval, optlen))
65930 return -EFAULT;
65931 + raw_sk(sk)->filter = filter;
65932 return 0;
65933 }
65934
65935 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65936 {
65937 int len, ret = -EFAULT;
65938 + struct icmp_filter filter;
65939
65940 if (get_user(len, optlen))
65941 goto out;
65942 @@ -749,8 +753,9 @@ static int raw_geticmpfilter(struct sock
65943 if (len > sizeof(struct icmp_filter))
65944 len = sizeof(struct icmp_filter);
65945 ret = -EFAULT;
65946 - if (put_user(len, optlen) ||
65947 - copy_to_user(optval, &raw_sk(sk)->filter, len))
65948 + filter = raw_sk(sk)->filter;
65949 + if (put_user(len, optlen) || len > sizeof filter ||
65950 + copy_to_user(optval, &filter, len))
65951 goto out;
65952 ret = 0;
65953 out: return ret;
65954 @@ -978,7 +983,13 @@ static void raw_sock_seq_show(struct seq
65955 sk_wmem_alloc_get(sp),
65956 sk_rmem_alloc_get(sp),
65957 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65958 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65959 + atomic_read(&sp->sk_refcnt),
65960 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65961 + NULL,
65962 +#else
65963 + sp,
65964 +#endif
65965 + atomic_read_unchecked(&sp->sk_drops));
65966 }
65967
65968 static int raw_seq_show(struct seq_file *seq, void *v)
65969 diff -urNp linux-2.6.39.4/net/ipv4/route.c linux-2.6.39.4/net/ipv4/route.c
65970 --- linux-2.6.39.4/net/ipv4/route.c 2011-07-09 09:18:51.000000000 -0400
65971 +++ linux-2.6.39.4/net/ipv4/route.c 2011-08-05 19:44:37.000000000 -0400
65972 @@ -303,7 +303,7 @@ static inline unsigned int rt_hash(__be3
65973
65974 static inline int rt_genid(struct net *net)
65975 {
65976 - return atomic_read(&net->ipv4.rt_genid);
65977 + return atomic_read_unchecked(&net->ipv4.rt_genid);
65978 }
65979
65980 #ifdef CONFIG_PROC_FS
65981 @@ -831,7 +831,7 @@ static void rt_cache_invalidate(struct n
65982 unsigned char shuffle;
65983
65984 get_random_bytes(&shuffle, sizeof(shuffle));
65985 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65986 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65987 }
65988
65989 /*
65990 @@ -2833,7 +2833,7 @@ static int rt_fill_info(struct net *net,
65991 rt->peer->pmtu_expires - jiffies : 0;
65992 if (rt->peer) {
65993 inet_peer_refcheck(rt->peer);
65994 - id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
65995 + id = atomic_read_unchecked(&rt->peer->ip_id_count) & 0xffff;
65996 if (rt->peer->tcp_ts_stamp) {
65997 ts = rt->peer->tcp_ts;
65998 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
65999 diff -urNp linux-2.6.39.4/net/ipv4/tcp.c linux-2.6.39.4/net/ipv4/tcp.c
66000 --- linux-2.6.39.4/net/ipv4/tcp.c 2011-05-19 00:06:34.000000000 -0400
66001 +++ linux-2.6.39.4/net/ipv4/tcp.c 2011-08-05 19:44:37.000000000 -0400
66002 @@ -2121,6 +2121,8 @@ static int do_tcp_setsockopt(struct sock
66003 int val;
66004 int err = 0;
66005
66006 + pax_track_stack();
66007 +
66008 /* These are data/string values, all the others are ints */
66009 switch (optname) {
66010 case TCP_CONGESTION: {
66011 @@ -2500,6 +2502,8 @@ static int do_tcp_getsockopt(struct sock
66012 struct tcp_sock *tp = tcp_sk(sk);
66013 int val, len;
66014
66015 + pax_track_stack();
66016 +
66017 if (get_user(len, optlen))
66018 return -EFAULT;
66019
66020 diff -urNp linux-2.6.39.4/net/ipv4/tcp_ipv4.c linux-2.6.39.4/net/ipv4/tcp_ipv4.c
66021 --- linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-05-19 00:06:34.000000000 -0400
66022 +++ linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-08-05 19:44:37.000000000 -0400
66023 @@ -86,6 +86,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
66024 int sysctl_tcp_low_latency __read_mostly;
66025 EXPORT_SYMBOL(sysctl_tcp_low_latency);
66026
66027 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66028 +extern int grsec_enable_blackhole;
66029 +#endif
66030
66031 #ifdef CONFIG_TCP_MD5SIG
66032 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
66033 @@ -1594,6 +1597,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
66034 return 0;
66035
66036 reset:
66037 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66038 + if (!grsec_enable_blackhole)
66039 +#endif
66040 tcp_v4_send_reset(rsk, skb);
66041 discard:
66042 kfree_skb(skb);
66043 @@ -1656,12 +1662,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
66044 TCP_SKB_CB(skb)->sacked = 0;
66045
66046 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66047 - if (!sk)
66048 + if (!sk) {
66049 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66050 + ret = 1;
66051 +#endif
66052 goto no_tcp_socket;
66053 -
66054 + }
66055 process:
66056 - if (sk->sk_state == TCP_TIME_WAIT)
66057 + if (sk->sk_state == TCP_TIME_WAIT) {
66058 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66059 + ret = 2;
66060 +#endif
66061 goto do_time_wait;
66062 + }
66063
66064 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
66065 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66066 @@ -1711,6 +1724,10 @@ no_tcp_socket:
66067 bad_packet:
66068 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66069 } else {
66070 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66071 + if (!grsec_enable_blackhole || (ret == 1 &&
66072 + (skb->dev->flags & IFF_LOOPBACK)))
66073 +#endif
66074 tcp_v4_send_reset(NULL, skb);
66075 }
66076
66077 @@ -2374,7 +2391,11 @@ static void get_openreq4(struct sock *sk
66078 0, /* non standard timer */
66079 0, /* open_requests have no inode */
66080 atomic_read(&sk->sk_refcnt),
66081 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66082 + NULL,
66083 +#else
66084 req,
66085 +#endif
66086 len);
66087 }
66088
66089 @@ -2424,7 +2445,12 @@ static void get_tcp4_sock(struct sock *s
66090 sock_i_uid(sk),
66091 icsk->icsk_probes_out,
66092 sock_i_ino(sk),
66093 - atomic_read(&sk->sk_refcnt), sk,
66094 + atomic_read(&sk->sk_refcnt),
66095 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66096 + NULL,
66097 +#else
66098 + sk,
66099 +#endif
66100 jiffies_to_clock_t(icsk->icsk_rto),
66101 jiffies_to_clock_t(icsk->icsk_ack.ato),
66102 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
66103 @@ -2452,7 +2478,13 @@ static void get_timewait4_sock(struct in
66104 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
66105 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
66106 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66107 - atomic_read(&tw->tw_refcnt), tw, len);
66108 + atomic_read(&tw->tw_refcnt),
66109 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66110 + NULL,
66111 +#else
66112 + tw,
66113 +#endif
66114 + len);
66115 }
66116
66117 #define TMPSZ 150
66118 diff -urNp linux-2.6.39.4/net/ipv4/tcp_minisocks.c linux-2.6.39.4/net/ipv4/tcp_minisocks.c
66119 --- linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-05-19 00:06:34.000000000 -0400
66120 +++ linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-08-05 19:44:37.000000000 -0400
66121 @@ -27,6 +27,10 @@
66122 #include <net/inet_common.h>
66123 #include <net/xfrm.h>
66124
66125 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66126 +extern int grsec_enable_blackhole;
66127 +#endif
66128 +
66129 int sysctl_tcp_syncookies __read_mostly = 1;
66130 EXPORT_SYMBOL(sysctl_tcp_syncookies);
66131
66132 @@ -745,6 +749,10 @@ listen_overflow:
66133
66134 embryonic_reset:
66135 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
66136 +
66137 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66138 + if (!grsec_enable_blackhole)
66139 +#endif
66140 if (!(flg & TCP_FLAG_RST))
66141 req->rsk_ops->send_reset(sk, skb);
66142
66143 diff -urNp linux-2.6.39.4/net/ipv4/tcp_output.c linux-2.6.39.4/net/ipv4/tcp_output.c
66144 --- linux-2.6.39.4/net/ipv4/tcp_output.c 2011-05-19 00:06:34.000000000 -0400
66145 +++ linux-2.6.39.4/net/ipv4/tcp_output.c 2011-08-05 19:44:37.000000000 -0400
66146 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
66147 int mss;
66148 int s_data_desired = 0;
66149
66150 + pax_track_stack();
66151 +
66152 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
66153 s_data_desired = cvp->s_data_desired;
66154 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
66155 diff -urNp linux-2.6.39.4/net/ipv4/tcp_probe.c linux-2.6.39.4/net/ipv4/tcp_probe.c
66156 --- linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-05-19 00:06:34.000000000 -0400
66157 +++ linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-08-05 19:44:37.000000000 -0400
66158 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
66159 if (cnt + width >= len)
66160 break;
66161
66162 - if (copy_to_user(buf + cnt, tbuf, width))
66163 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
66164 return -EFAULT;
66165 cnt += width;
66166 }
66167 diff -urNp linux-2.6.39.4/net/ipv4/tcp_timer.c linux-2.6.39.4/net/ipv4/tcp_timer.c
66168 --- linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-05-19 00:06:34.000000000 -0400
66169 +++ linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-08-05 19:44:37.000000000 -0400
66170 @@ -22,6 +22,10 @@
66171 #include <linux/gfp.h>
66172 #include <net/tcp.h>
66173
66174 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66175 +extern int grsec_lastack_retries;
66176 +#endif
66177 +
66178 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
66179 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
66180 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
66181 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
66182 }
66183 }
66184
66185 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66186 + if ((sk->sk_state == TCP_LAST_ACK) &&
66187 + (grsec_lastack_retries > 0) &&
66188 + (grsec_lastack_retries < retry_until))
66189 + retry_until = grsec_lastack_retries;
66190 +#endif
66191 +
66192 if (retransmits_timed_out(sk, retry_until,
66193 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
66194 /* Has it gone just too far? */
66195 diff -urNp linux-2.6.39.4/net/ipv4/udp.c linux-2.6.39.4/net/ipv4/udp.c
66196 --- linux-2.6.39.4/net/ipv4/udp.c 2011-07-09 09:18:51.000000000 -0400
66197 +++ linux-2.6.39.4/net/ipv4/udp.c 2011-08-05 19:44:37.000000000 -0400
66198 @@ -86,6 +86,7 @@
66199 #include <linux/types.h>
66200 #include <linux/fcntl.h>
66201 #include <linux/module.h>
66202 +#include <linux/security.h>
66203 #include <linux/socket.h>
66204 #include <linux/sockios.h>
66205 #include <linux/igmp.h>
66206 @@ -107,6 +108,10 @@
66207 #include <net/xfrm.h>
66208 #include "udp_impl.h"
66209
66210 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66211 +extern int grsec_enable_blackhole;
66212 +#endif
66213 +
66214 struct udp_table udp_table __read_mostly;
66215 EXPORT_SYMBOL(udp_table);
66216
66217 @@ -564,6 +569,9 @@ found:
66218 return s;
66219 }
66220
66221 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
66222 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
66223 +
66224 /*
66225 * This routine is called by the ICMP module when it gets some
66226 * sort of error condition. If err < 0 then the socket should
66227 @@ -853,9 +861,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
66228 dport = usin->sin_port;
66229 if (dport == 0)
66230 return -EINVAL;
66231 +
66232 + err = gr_search_udp_sendmsg(sk, usin);
66233 + if (err)
66234 + return err;
66235 } else {
66236 if (sk->sk_state != TCP_ESTABLISHED)
66237 return -EDESTADDRREQ;
66238 +
66239 + err = gr_search_udp_sendmsg(sk, NULL);
66240 + if (err)
66241 + return err;
66242 +
66243 daddr = inet->inet_daddr;
66244 dport = inet->inet_dport;
66245 /* Open fast path for connected socket.
66246 @@ -1090,7 +1107,7 @@ static unsigned int first_packet_length(
66247 udp_lib_checksum_complete(skb)) {
66248 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66249 IS_UDPLITE(sk));
66250 - atomic_inc(&sk->sk_drops);
66251 + atomic_inc_unchecked(&sk->sk_drops);
66252 __skb_unlink(skb, rcvq);
66253 __skb_queue_tail(&list_kill, skb);
66254 }
66255 @@ -1176,6 +1193,10 @@ try_again:
66256 if (!skb)
66257 goto out;
66258
66259 + err = gr_search_udp_recvmsg(sk, skb);
66260 + if (err)
66261 + goto out_free;
66262 +
66263 ulen = skb->len - sizeof(struct udphdr);
66264 if (len > ulen)
66265 len = ulen;
66266 @@ -1475,7 +1496,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
66267
66268 drop:
66269 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66270 - atomic_inc(&sk->sk_drops);
66271 + atomic_inc_unchecked(&sk->sk_drops);
66272 kfree_skb(skb);
66273 return -1;
66274 }
66275 @@ -1494,7 +1515,7 @@ static void flush_stack(struct sock **st
66276 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
66277
66278 if (!skb1) {
66279 - atomic_inc(&sk->sk_drops);
66280 + atomic_inc_unchecked(&sk->sk_drops);
66281 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
66282 IS_UDPLITE(sk));
66283 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66284 @@ -1663,6 +1684,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
66285 goto csum_error;
66286
66287 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
66288 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66289 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66290 +#endif
66291 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
66292
66293 /*
66294 @@ -2090,8 +2114,13 @@ static void udp4_format_sock(struct sock
66295 sk_wmem_alloc_get(sp),
66296 sk_rmem_alloc_get(sp),
66297 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
66298 - atomic_read(&sp->sk_refcnt), sp,
66299 - atomic_read(&sp->sk_drops), len);
66300 + atomic_read(&sp->sk_refcnt),
66301 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66302 + NULL,
66303 +#else
66304 + sp,
66305 +#endif
66306 + atomic_read_unchecked(&sp->sk_drops), len);
66307 }
66308
66309 int udp4_seq_show(struct seq_file *seq, void *v)
66310 diff -urNp linux-2.6.39.4/net/ipv6/inet6_connection_sock.c linux-2.6.39.4/net/ipv6/inet6_connection_sock.c
66311 --- linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-05-19 00:06:34.000000000 -0400
66312 +++ linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-08-05 19:44:37.000000000 -0400
66313 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
66314 #ifdef CONFIG_XFRM
66315 {
66316 struct rt6_info *rt = (struct rt6_info *)dst;
66317 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
66318 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
66319 }
66320 #endif
66321 }
66322 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
66323 #ifdef CONFIG_XFRM
66324 if (dst) {
66325 struct rt6_info *rt = (struct rt6_info *)dst;
66326 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
66327 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
66328 __sk_dst_reset(sk);
66329 dst = NULL;
66330 }
66331 diff -urNp linux-2.6.39.4/net/ipv6/ipv6_sockglue.c linux-2.6.39.4/net/ipv6/ipv6_sockglue.c
66332 --- linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-05-19 00:06:34.000000000 -0400
66333 +++ linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-08-05 19:44:37.000000000 -0400
66334 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
66335 int val, valbool;
66336 int retv = -ENOPROTOOPT;
66337
66338 + pax_track_stack();
66339 +
66340 if (optval == NULL)
66341 val=0;
66342 else {
66343 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
66344 int len;
66345 int val;
66346
66347 + pax_track_stack();
66348 +
66349 if (ip6_mroute_opt(optname))
66350 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
66351
66352 diff -urNp linux-2.6.39.4/net/ipv6/raw.c linux-2.6.39.4/net/ipv6/raw.c
66353 --- linux-2.6.39.4/net/ipv6/raw.c 2011-05-19 00:06:34.000000000 -0400
66354 +++ linux-2.6.39.4/net/ipv6/raw.c 2011-08-14 11:25:44.000000000 -0400
66355 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
66356 {
66357 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
66358 skb_checksum_complete(skb)) {
66359 - atomic_inc(&sk->sk_drops);
66360 + atomic_inc_unchecked(&sk->sk_drops);
66361 kfree_skb(skb);
66362 return NET_RX_DROP;
66363 }
66364 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66365 struct raw6_sock *rp = raw6_sk(sk);
66366
66367 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
66368 - atomic_inc(&sk->sk_drops);
66369 + atomic_inc_unchecked(&sk->sk_drops);
66370 kfree_skb(skb);
66371 return NET_RX_DROP;
66372 }
66373 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66374
66375 if (inet->hdrincl) {
66376 if (skb_checksum_complete(skb)) {
66377 - atomic_inc(&sk->sk_drops);
66378 + atomic_inc_unchecked(&sk->sk_drops);
66379 kfree_skb(skb);
66380 return NET_RX_DROP;
66381 }
66382 @@ -601,7 +601,7 @@ out:
66383 return err;
66384 }
66385
66386 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
66387 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66388 struct flowi6 *fl6, struct dst_entry **dstp,
66389 unsigned int flags)
66390 {
66391 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
66392 u16 proto;
66393 int err;
66394
66395 + pax_track_stack();
66396 +
66397 /* Rough check on arithmetic overflow,
66398 better check is made in ip6_append_data().
66399 */
66400 @@ -909,12 +911,15 @@ do_confirm:
66401 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
66402 char __user *optval, int optlen)
66403 {
66404 + struct icmp6_filter filter;
66405 +
66406 switch (optname) {
66407 case ICMPV6_FILTER:
66408 if (optlen > sizeof(struct icmp6_filter))
66409 optlen = sizeof(struct icmp6_filter);
66410 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66411 + if (copy_from_user(&filter, optval, optlen))
66412 return -EFAULT;
66413 + raw6_sk(sk)->filter = filter;
66414 return 0;
66415 default:
66416 return -ENOPROTOOPT;
66417 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
66418 char __user *optval, int __user *optlen)
66419 {
66420 int len;
66421 + struct icmp6_filter filter;
66422
66423 switch (optname) {
66424 case ICMPV6_FILTER:
66425 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66426 len = sizeof(struct icmp6_filter);
66427 if (put_user(len, optlen))
66428 return -EFAULT;
66429 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66430 + filter = raw6_sk(sk)->filter;
66431 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
66432 return -EFAULT;
66433 return 0;
66434 default:
66435 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66436 0, 0L, 0,
66437 sock_i_uid(sp), 0,
66438 sock_i_ino(sp),
66439 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66440 + atomic_read(&sp->sk_refcnt),
66441 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66442 + NULL,
66443 +#else
66444 + sp,
66445 +#endif
66446 + atomic_read_unchecked(&sp->sk_drops));
66447 }
66448
66449 static int raw6_seq_show(struct seq_file *seq, void *v)
66450 diff -urNp linux-2.6.39.4/net/ipv6/tcp_ipv6.c linux-2.6.39.4/net/ipv6/tcp_ipv6.c
66451 --- linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-05-19 00:06:34.000000000 -0400
66452 +++ linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-08-05 19:44:37.000000000 -0400
66453 @@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66454 }
66455 #endif
66456
66457 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66458 +extern int grsec_enable_blackhole;
66459 +#endif
66460 +
66461 static void tcp_v6_hash(struct sock *sk)
66462 {
66463 if (sk->sk_state != TCP_CLOSE) {
66464 @@ -1660,6 +1664,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66465 return 0;
66466
66467 reset:
66468 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66469 + if (!grsec_enable_blackhole)
66470 +#endif
66471 tcp_v6_send_reset(sk, skb);
66472 discard:
66473 if (opt_skb)
66474 @@ -1739,12 +1746,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66475 TCP_SKB_CB(skb)->sacked = 0;
66476
66477 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66478 - if (!sk)
66479 + if (!sk) {
66480 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66481 + ret = 1;
66482 +#endif
66483 goto no_tcp_socket;
66484 + }
66485
66486 process:
66487 - if (sk->sk_state == TCP_TIME_WAIT)
66488 + if (sk->sk_state == TCP_TIME_WAIT) {
66489 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66490 + ret = 2;
66491 +#endif
66492 goto do_time_wait;
66493 + }
66494
66495 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66496 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66497 @@ -1792,6 +1807,10 @@ no_tcp_socket:
66498 bad_packet:
66499 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66500 } else {
66501 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66502 + if (!grsec_enable_blackhole || (ret == 1 &&
66503 + (skb->dev->flags & IFF_LOOPBACK)))
66504 +#endif
66505 tcp_v6_send_reset(NULL, skb);
66506 }
66507
66508 @@ -2052,7 +2071,13 @@ static void get_openreq6(struct seq_file
66509 uid,
66510 0, /* non standard timer */
66511 0, /* open_requests have no inode */
66512 - 0, req);
66513 + 0,
66514 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66515 + NULL
66516 +#else
66517 + req
66518 +#endif
66519 + );
66520 }
66521
66522 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66523 @@ -2102,7 +2127,12 @@ static void get_tcp6_sock(struct seq_fil
66524 sock_i_uid(sp),
66525 icsk->icsk_probes_out,
66526 sock_i_ino(sp),
66527 - atomic_read(&sp->sk_refcnt), sp,
66528 + atomic_read(&sp->sk_refcnt),
66529 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66530 + NULL,
66531 +#else
66532 + sp,
66533 +#endif
66534 jiffies_to_clock_t(icsk->icsk_rto),
66535 jiffies_to_clock_t(icsk->icsk_ack.ato),
66536 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66537 @@ -2137,7 +2167,13 @@ static void get_timewait6_sock(struct se
66538 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66539 tw->tw_substate, 0, 0,
66540 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66541 - atomic_read(&tw->tw_refcnt), tw);
66542 + atomic_read(&tw->tw_refcnt),
66543 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66544 + NULL
66545 +#else
66546 + tw
66547 +#endif
66548 + );
66549 }
66550
66551 static int tcp6_seq_show(struct seq_file *seq, void *v)
66552 diff -urNp linux-2.6.39.4/net/ipv6/udp.c linux-2.6.39.4/net/ipv6/udp.c
66553 --- linux-2.6.39.4/net/ipv6/udp.c 2011-07-09 09:18:51.000000000 -0400
66554 +++ linux-2.6.39.4/net/ipv6/udp.c 2011-08-05 19:44:37.000000000 -0400
66555 @@ -50,6 +50,10 @@
66556 #include <linux/seq_file.h>
66557 #include "udp_impl.h"
66558
66559 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66560 +extern int grsec_enable_blackhole;
66561 +#endif
66562 +
66563 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66564 {
66565 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66566 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66567
66568 return 0;
66569 drop:
66570 - atomic_inc(&sk->sk_drops);
66571 + atomic_inc_unchecked(&sk->sk_drops);
66572 drop_no_sk_drops_inc:
66573 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66574 kfree_skb(skb);
66575 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66576 continue;
66577 }
66578 drop:
66579 - atomic_inc(&sk->sk_drops);
66580 + atomic_inc_unchecked(&sk->sk_drops);
66581 UDP6_INC_STATS_BH(sock_net(sk),
66582 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66583 UDP6_INC_STATS_BH(sock_net(sk),
66584 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66585 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66586 proto == IPPROTO_UDPLITE);
66587
66588 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66589 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66590 +#endif
66591 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66592
66593 kfree_skb(skb);
66594 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66595 if (!sock_owned_by_user(sk))
66596 udpv6_queue_rcv_skb(sk, skb);
66597 else if (sk_add_backlog(sk, skb)) {
66598 - atomic_inc(&sk->sk_drops);
66599 + atomic_inc_unchecked(&sk->sk_drops);
66600 bh_unlock_sock(sk);
66601 sock_put(sk);
66602 goto discard;
66603 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66604 0, 0L, 0,
66605 sock_i_uid(sp), 0,
66606 sock_i_ino(sp),
66607 - atomic_read(&sp->sk_refcnt), sp,
66608 - atomic_read(&sp->sk_drops));
66609 + atomic_read(&sp->sk_refcnt),
66610 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66611 + NULL,
66612 +#else
66613 + sp,
66614 +#endif
66615 + atomic_read_unchecked(&sp->sk_drops));
66616 }
66617
66618 int udp6_seq_show(struct seq_file *seq, void *v)
66619 diff -urNp linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c
66620 --- linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-05-19 00:06:34.000000000 -0400
66621 +++ linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-08-05 19:44:37.000000000 -0400
66622 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
66623 add_wait_queue(&self->open_wait, &wait);
66624
66625 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66626 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66627 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66628
66629 /* As far as I can see, we protect open_count - Jean II */
66630 spin_lock_irqsave(&self->spinlock, flags);
66631 if (!tty_hung_up_p(filp)) {
66632 extra_count = 1;
66633 - self->open_count--;
66634 + local_dec(&self->open_count);
66635 }
66636 spin_unlock_irqrestore(&self->spinlock, flags);
66637 - self->blocked_open++;
66638 + local_inc(&self->blocked_open);
66639
66640 while (1) {
66641 if (tty->termios->c_cflag & CBAUD) {
66642 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
66643 }
66644
66645 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66646 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66647 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66648
66649 schedule();
66650 }
66651 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
66652 if (extra_count) {
66653 /* ++ is not atomic, so this should be protected - Jean II */
66654 spin_lock_irqsave(&self->spinlock, flags);
66655 - self->open_count++;
66656 + local_inc(&self->open_count);
66657 spin_unlock_irqrestore(&self->spinlock, flags);
66658 }
66659 - self->blocked_open--;
66660 + local_dec(&self->blocked_open);
66661
66662 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66663 - __FILE__,__LINE__, tty->driver->name, self->open_count);
66664 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66665
66666 if (!retval)
66667 self->flags |= ASYNC_NORMAL_ACTIVE;
66668 @@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
66669 }
66670 /* ++ is not atomic, so this should be protected - Jean II */
66671 spin_lock_irqsave(&self->spinlock, flags);
66672 - self->open_count++;
66673 + local_inc(&self->open_count);
66674
66675 tty->driver_data = self;
66676 self->tty = tty;
66677 spin_unlock_irqrestore(&self->spinlock, flags);
66678
66679 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66680 - self->line, self->open_count);
66681 + self->line, local_read(&self->open_count));
66682
66683 /* Not really used by us, but lets do it anyway */
66684 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66685 @@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
66686 return;
66687 }
66688
66689 - if ((tty->count == 1) && (self->open_count != 1)) {
66690 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66691 /*
66692 * Uh, oh. tty->count is 1, which means that the tty
66693 * structure will be freed. state->count should always
66694 @@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
66695 */
66696 IRDA_DEBUG(0, "%s(), bad serial port count; "
66697 "tty->count is 1, state->count is %d\n", __func__ ,
66698 - self->open_count);
66699 - self->open_count = 1;
66700 + local_read(&self->open_count));
66701 + local_set(&self->open_count, 1);
66702 }
66703
66704 - if (--self->open_count < 0) {
66705 + if (local_dec_return(&self->open_count) < 0) {
66706 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66707 - __func__, self->line, self->open_count);
66708 - self->open_count = 0;
66709 + __func__, self->line, local_read(&self->open_count));
66710 + local_set(&self->open_count, 0);
66711 }
66712 - if (self->open_count) {
66713 + if (local_read(&self->open_count)) {
66714 spin_unlock_irqrestore(&self->spinlock, flags);
66715
66716 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66717 @@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
66718 tty->closing = 0;
66719 self->tty = NULL;
66720
66721 - if (self->blocked_open) {
66722 + if (local_read(&self->blocked_open)) {
66723 if (self->close_delay)
66724 schedule_timeout_interruptible(self->close_delay);
66725 wake_up_interruptible(&self->open_wait);
66726 @@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
66727 spin_lock_irqsave(&self->spinlock, flags);
66728 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66729 self->tty = NULL;
66730 - self->open_count = 0;
66731 + local_set(&self->open_count, 0);
66732 spin_unlock_irqrestore(&self->spinlock, flags);
66733
66734 wake_up_interruptible(&self->open_wait);
66735 @@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
66736 seq_putc(m, '\n');
66737
66738 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66739 - seq_printf(m, "Open count: %d\n", self->open_count);
66740 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66741 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66742 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66743
66744 diff -urNp linux-2.6.39.4/net/iucv/af_iucv.c linux-2.6.39.4/net/iucv/af_iucv.c
66745 --- linux-2.6.39.4/net/iucv/af_iucv.c 2011-05-19 00:06:34.000000000 -0400
66746 +++ linux-2.6.39.4/net/iucv/af_iucv.c 2011-08-05 19:44:37.000000000 -0400
66747 @@ -653,10 +653,10 @@ static int iucv_sock_autobind(struct soc
66748
66749 write_lock_bh(&iucv_sk_list.lock);
66750
66751 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66752 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66753 while (__iucv_get_sock_by_name(name)) {
66754 sprintf(name, "%08x",
66755 - atomic_inc_return(&iucv_sk_list.autobind_name));
66756 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66757 }
66758
66759 write_unlock_bh(&iucv_sk_list.lock);
66760 diff -urNp linux-2.6.39.4/net/key/af_key.c linux-2.6.39.4/net/key/af_key.c
66761 --- linux-2.6.39.4/net/key/af_key.c 2011-05-19 00:06:34.000000000 -0400
66762 +++ linux-2.6.39.4/net/key/af_key.c 2011-08-05 19:44:37.000000000 -0400
66763 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66764 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66765 struct xfrm_kmaddress k;
66766
66767 + pax_track_stack();
66768 +
66769 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66770 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66771 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66772 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66773 static u32 get_acqseq(void)
66774 {
66775 u32 res;
66776 - static atomic_t acqseq;
66777 + static atomic_unchecked_t acqseq;
66778
66779 do {
66780 - res = atomic_inc_return(&acqseq);
66781 + res = atomic_inc_return_unchecked(&acqseq);
66782 } while (!res);
66783 return res;
66784 }
66785 @@ -3657,7 +3659,11 @@ static int pfkey_seq_show(struct seq_fil
66786 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
66787 else
66788 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
66789 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66790 + NULL,
66791 +#else
66792 s,
66793 +#endif
66794 atomic_read(&s->sk_refcnt),
66795 sk_rmem_alloc_get(s),
66796 sk_wmem_alloc_get(s),
66797 diff -urNp linux-2.6.39.4/net/lapb/lapb_iface.c linux-2.6.39.4/net/lapb/lapb_iface.c
66798 --- linux-2.6.39.4/net/lapb/lapb_iface.c 2011-05-19 00:06:34.000000000 -0400
66799 +++ linux-2.6.39.4/net/lapb/lapb_iface.c 2011-08-05 20:34:06.000000000 -0400
66800 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66801 goto out;
66802
66803 lapb->dev = dev;
66804 - lapb->callbacks = *callbacks;
66805 + lapb->callbacks = callbacks;
66806
66807 __lapb_insert_cb(lapb);
66808
66809 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66810
66811 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66812 {
66813 - if (lapb->callbacks.connect_confirmation)
66814 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
66815 + if (lapb->callbacks->connect_confirmation)
66816 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
66817 }
66818
66819 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66820 {
66821 - if (lapb->callbacks.connect_indication)
66822 - lapb->callbacks.connect_indication(lapb->dev, reason);
66823 + if (lapb->callbacks->connect_indication)
66824 + lapb->callbacks->connect_indication(lapb->dev, reason);
66825 }
66826
66827 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66828 {
66829 - if (lapb->callbacks.disconnect_confirmation)
66830 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66831 + if (lapb->callbacks->disconnect_confirmation)
66832 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66833 }
66834
66835 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66836 {
66837 - if (lapb->callbacks.disconnect_indication)
66838 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
66839 + if (lapb->callbacks->disconnect_indication)
66840 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
66841 }
66842
66843 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66844 {
66845 - if (lapb->callbacks.data_indication)
66846 - return lapb->callbacks.data_indication(lapb->dev, skb);
66847 + if (lapb->callbacks->data_indication)
66848 + return lapb->callbacks->data_indication(lapb->dev, skb);
66849
66850 kfree_skb(skb);
66851 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66852 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66853 {
66854 int used = 0;
66855
66856 - if (lapb->callbacks.data_transmit) {
66857 - lapb->callbacks.data_transmit(lapb->dev, skb);
66858 + if (lapb->callbacks->data_transmit) {
66859 + lapb->callbacks->data_transmit(lapb->dev, skb);
66860 used = 1;
66861 }
66862
66863 diff -urNp linux-2.6.39.4/net/mac80211/debugfs_sta.c linux-2.6.39.4/net/mac80211/debugfs_sta.c
66864 --- linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-05-19 00:06:34.000000000 -0400
66865 +++ linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-08-05 19:44:37.000000000 -0400
66866 @@ -115,6 +115,8 @@ static ssize_t sta_agg_status_read(struc
66867 struct tid_ampdu_rx *tid_rx;
66868 struct tid_ampdu_tx *tid_tx;
66869
66870 + pax_track_stack();
66871 +
66872 rcu_read_lock();
66873
66874 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66875 @@ -215,6 +217,8 @@ static ssize_t sta_ht_capa_read(struct f
66876 struct sta_info *sta = file->private_data;
66877 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66878
66879 + pax_track_stack();
66880 +
66881 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66882 htc->ht_supported ? "" : "not ");
66883 if (htc->ht_supported) {
66884 diff -urNp linux-2.6.39.4/net/mac80211/ieee80211_i.h linux-2.6.39.4/net/mac80211/ieee80211_i.h
66885 --- linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-05-19 00:06:34.000000000 -0400
66886 +++ linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-08-05 19:44:37.000000000 -0400
66887 @@ -27,6 +27,7 @@
66888 #include <net/ieee80211_radiotap.h>
66889 #include <net/cfg80211.h>
66890 #include <net/mac80211.h>
66891 +#include <asm/local.h>
66892 #include "key.h"
66893 #include "sta_info.h"
66894
66895 @@ -714,7 +715,7 @@ struct ieee80211_local {
66896 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66897 spinlock_t queue_stop_reason_lock;
66898
66899 - int open_count;
66900 + local_t open_count;
66901 int monitors, cooked_mntrs;
66902 /* number of interfaces with corresponding FIF_ flags */
66903 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66904 diff -urNp linux-2.6.39.4/net/mac80211/iface.c linux-2.6.39.4/net/mac80211/iface.c
66905 --- linux-2.6.39.4/net/mac80211/iface.c 2011-05-19 00:06:34.000000000 -0400
66906 +++ linux-2.6.39.4/net/mac80211/iface.c 2011-08-05 19:44:37.000000000 -0400
66907 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66908 break;
66909 }
66910
66911 - if (local->open_count == 0) {
66912 + if (local_read(&local->open_count) == 0) {
66913 res = drv_start(local);
66914 if (res)
66915 goto err_del_bss;
66916 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66917 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66918
66919 if (!is_valid_ether_addr(dev->dev_addr)) {
66920 - if (!local->open_count)
66921 + if (!local_read(&local->open_count))
66922 drv_stop(local);
66923 return -EADDRNOTAVAIL;
66924 }
66925 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66926 mutex_unlock(&local->mtx);
66927
66928 if (coming_up)
66929 - local->open_count++;
66930 + local_inc(&local->open_count);
66931
66932 if (hw_reconf_flags) {
66933 ieee80211_hw_config(local, hw_reconf_flags);
66934 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66935 err_del_interface:
66936 drv_remove_interface(local, &sdata->vif);
66937 err_stop:
66938 - if (!local->open_count)
66939 + if (!local_read(&local->open_count))
66940 drv_stop(local);
66941 err_del_bss:
66942 sdata->bss = NULL;
66943 @@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
66944 }
66945
66946 if (going_down)
66947 - local->open_count--;
66948 + local_dec(&local->open_count);
66949
66950 switch (sdata->vif.type) {
66951 case NL80211_IFTYPE_AP_VLAN:
66952 @@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
66953
66954 ieee80211_recalc_ps(local, -1);
66955
66956 - if (local->open_count == 0) {
66957 + if (local_read(&local->open_count) == 0) {
66958 if (local->ops->napi_poll)
66959 napi_disable(&local->napi);
66960 ieee80211_clear_tx_pending(local);
66961 diff -urNp linux-2.6.39.4/net/mac80211/main.c linux-2.6.39.4/net/mac80211/main.c
66962 --- linux-2.6.39.4/net/mac80211/main.c 2011-05-19 00:06:34.000000000 -0400
66963 +++ linux-2.6.39.4/net/mac80211/main.c 2011-08-05 19:44:37.000000000 -0400
66964 @@ -215,7 +215,7 @@ int ieee80211_hw_config(struct ieee80211
66965 local->hw.conf.power_level = power;
66966 }
66967
66968 - if (changed && local->open_count) {
66969 + if (changed && local_read(&local->open_count)) {
66970 ret = drv_config(local, changed);
66971 /*
66972 * Goal:
66973 diff -urNp linux-2.6.39.4/net/mac80211/mlme.c linux-2.6.39.4/net/mac80211/mlme.c
66974 --- linux-2.6.39.4/net/mac80211/mlme.c 2011-06-03 00:04:14.000000000 -0400
66975 +++ linux-2.6.39.4/net/mac80211/mlme.c 2011-08-05 19:44:37.000000000 -0400
66976 @@ -1431,6 +1431,8 @@ static bool ieee80211_assoc_success(stru
66977 bool have_higher_than_11mbit = false;
66978 u16 ap_ht_cap_flags;
66979
66980 + pax_track_stack();
66981 +
66982 /* AssocResp and ReassocResp have identical structure */
66983
66984 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66985 diff -urNp linux-2.6.39.4/net/mac80211/pm.c linux-2.6.39.4/net/mac80211/pm.c
66986 --- linux-2.6.39.4/net/mac80211/pm.c 2011-05-19 00:06:34.000000000 -0400
66987 +++ linux-2.6.39.4/net/mac80211/pm.c 2011-08-05 19:44:37.000000000 -0400
66988 @@ -95,7 +95,7 @@ int __ieee80211_suspend(struct ieee80211
66989 }
66990
66991 /* stop hardware - this must stop RX */
66992 - if (local->open_count)
66993 + if (local_read(&local->open_count))
66994 ieee80211_stop_device(local);
66995
66996 local->suspended = true;
66997 diff -urNp linux-2.6.39.4/net/mac80211/rate.c linux-2.6.39.4/net/mac80211/rate.c
66998 --- linux-2.6.39.4/net/mac80211/rate.c 2011-05-19 00:06:34.000000000 -0400
66999 +++ linux-2.6.39.4/net/mac80211/rate.c 2011-08-05 19:44:37.000000000 -0400
67000 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
67001
67002 ASSERT_RTNL();
67003
67004 - if (local->open_count)
67005 + if (local_read(&local->open_count))
67006 return -EBUSY;
67007
67008 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
67009 diff -urNp linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c
67010 --- linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-05-19 00:06:34.000000000 -0400
67011 +++ linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-05 19:44:37.000000000 -0400
67012 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
67013
67014 spin_unlock_irqrestore(&events->lock, status);
67015
67016 - if (copy_to_user(buf, pb, p))
67017 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
67018 return -EFAULT;
67019
67020 return p;
67021 diff -urNp linux-2.6.39.4/net/mac80211/util.c linux-2.6.39.4/net/mac80211/util.c
67022 --- linux-2.6.39.4/net/mac80211/util.c 2011-05-19 00:06:34.000000000 -0400
67023 +++ linux-2.6.39.4/net/mac80211/util.c 2011-08-05 19:44:37.000000000 -0400
67024 @@ -1129,7 +1129,7 @@ int ieee80211_reconfig(struct ieee80211_
67025 local->resuming = true;
67026
67027 /* restart hardware */
67028 - if (local->open_count) {
67029 + if (local_read(&local->open_count)) {
67030 /*
67031 * Upon resume hardware can sometimes be goofy due to
67032 * various platform / driver / bus issues, so restarting
67033 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c
67034 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-09 09:18:51.000000000 -0400
67035 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-05 19:44:37.000000000 -0400
67036 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
67037 /* Increase the refcnt counter of the dest */
67038 atomic_inc(&dest->refcnt);
67039
67040 - conn_flags = atomic_read(&dest->conn_flags);
67041 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
67042 if (cp->protocol != IPPROTO_UDP)
67043 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
67044 /* Bind with the destination and its corresponding transmitter */
67045 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
67046 atomic_set(&cp->refcnt, 1);
67047
67048 atomic_set(&cp->n_control, 0);
67049 - atomic_set(&cp->in_pkts, 0);
67050 + atomic_set_unchecked(&cp->in_pkts, 0);
67051
67052 atomic_inc(&ipvs->conn_count);
67053 if (flags & IP_VS_CONN_F_NO_CPORT)
67054 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
67055
67056 /* Don't drop the entry if its number of incoming packets is not
67057 located in [0, 8] */
67058 - i = atomic_read(&cp->in_pkts);
67059 + i = atomic_read_unchecked(&cp->in_pkts);
67060 if (i > 8 || i < 0) return 0;
67061
67062 if (!todrop_rate[i]) return 0;
67063 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c
67064 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-09 09:18:51.000000000 -0400
67065 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-05 19:44:37.000000000 -0400
67066 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
67067 ret = cp->packet_xmit(skb, cp, pd->pp);
67068 /* do not touch skb anymore */
67069
67070 - atomic_inc(&cp->in_pkts);
67071 + atomic_inc_unchecked(&cp->in_pkts);
67072 ip_vs_conn_put(cp);
67073 return ret;
67074 }
67075 @@ -1633,7 +1633,7 @@ ip_vs_in(unsigned int hooknum, struct sk
67076 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
67077 pkts = sysctl_sync_threshold(ipvs);
67078 else
67079 - pkts = atomic_add_return(1, &cp->in_pkts);
67080 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67081
67082 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
67083 cp->protocol == IPPROTO_SCTP) {
67084 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c
67085 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-19 00:06:34.000000000 -0400
67086 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-05 19:44:37.000000000 -0400
67087 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
67088 ip_vs_rs_hash(ipvs, dest);
67089 write_unlock_bh(&ipvs->rs_lock);
67090 }
67091 - atomic_set(&dest->conn_flags, conn_flags);
67092 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
67093
67094 /* bind the service */
67095 if (!dest->svc) {
67096 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
67097 " %-7s %-6d %-10d %-10d\n",
67098 &dest->addr.in6,
67099 ntohs(dest->port),
67100 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67101 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67102 atomic_read(&dest->weight),
67103 atomic_read(&dest->activeconns),
67104 atomic_read(&dest->inactconns));
67105 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
67106 "%-7s %-6d %-10d %-10d\n",
67107 ntohl(dest->addr.ip),
67108 ntohs(dest->port),
67109 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67110 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67111 atomic_read(&dest->weight),
67112 atomic_read(&dest->activeconns),
67113 atomic_read(&dest->inactconns));
67114 @@ -2287,6 +2287,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
67115 struct ip_vs_dest_user *udest_compat;
67116 struct ip_vs_dest_user_kern udest;
67117
67118 + pax_track_stack();
67119 +
67120 if (!capable(CAP_NET_ADMIN))
67121 return -EPERM;
67122
67123 @@ -2501,7 +2503,7 @@ __ip_vs_get_dest_entries(struct net *net
67124
67125 entry.addr = dest->addr.ip;
67126 entry.port = dest->port;
67127 - entry.conn_flags = atomic_read(&dest->conn_flags);
67128 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
67129 entry.weight = atomic_read(&dest->weight);
67130 entry.u_threshold = dest->u_threshold;
67131 entry.l_threshold = dest->l_threshold;
67132 @@ -3029,7 +3031,7 @@ static int ip_vs_genl_fill_dest(struct s
67133 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
67134
67135 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
67136 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67137 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67138 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
67139 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
67140 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
67141 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c
67142 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-05-19 00:06:34.000000000 -0400
67143 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-05 19:44:37.000000000 -0400
67144 @@ -648,7 +648,7 @@ control:
67145 * i.e only increment in_pkts for Templates.
67146 */
67147 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
67148 - int pkts = atomic_add_return(1, &cp->in_pkts);
67149 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67150
67151 if (pkts % sysctl_sync_period(ipvs) != 1)
67152 return;
67153 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
67154
67155 if (opt)
67156 memcpy(&cp->in_seq, opt, sizeof(*opt));
67157 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67158 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67159 cp->state = state;
67160 cp->old_state = cp->state;
67161 /*
67162 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c
67163 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-19 00:06:34.000000000 -0400
67164 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-05 19:44:37.000000000 -0400
67165 @@ -1127,7 +1127,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
67166 else
67167 rc = NF_ACCEPT;
67168 /* do not touch skb anymore */
67169 - atomic_inc(&cp->in_pkts);
67170 + atomic_inc_unchecked(&cp->in_pkts);
67171 goto out;
67172 }
67173
67174 @@ -1245,7 +1245,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
67175 else
67176 rc = NF_ACCEPT;
67177 /* do not touch skb anymore */
67178 - atomic_inc(&cp->in_pkts);
67179 + atomic_inc_unchecked(&cp->in_pkts);
67180 goto out;
67181 }
67182
67183 diff -urNp linux-2.6.39.4/net/netfilter/Kconfig linux-2.6.39.4/net/netfilter/Kconfig
67184 --- linux-2.6.39.4/net/netfilter/Kconfig 2011-05-19 00:06:34.000000000 -0400
67185 +++ linux-2.6.39.4/net/netfilter/Kconfig 2011-08-05 19:44:37.000000000 -0400
67186 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
67187
67188 To compile it as a module, choose M here. If unsure, say N.
67189
67190 +config NETFILTER_XT_MATCH_GRADM
67191 + tristate '"gradm" match support'
67192 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
67193 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
67194 + ---help---
67195 + The gradm match allows to match on grsecurity RBAC being enabled.
67196 + It is useful when iptables rules are applied early on bootup to
67197 + prevent connections to the machine (except from a trusted host)
67198 + while the RBAC system is disabled.
67199 +
67200 config NETFILTER_XT_MATCH_HASHLIMIT
67201 tristate '"hashlimit" match support'
67202 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
67203 diff -urNp linux-2.6.39.4/net/netfilter/Makefile linux-2.6.39.4/net/netfilter/Makefile
67204 --- linux-2.6.39.4/net/netfilter/Makefile 2011-05-19 00:06:34.000000000 -0400
67205 +++ linux-2.6.39.4/net/netfilter/Makefile 2011-08-05 19:44:37.000000000 -0400
67206 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
67207 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
67208 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
67209 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
67210 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
67211 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
67212 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
67213 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
67214 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_log.c linux-2.6.39.4/net/netfilter/nfnetlink_log.c
67215 --- linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-05-19 00:06:34.000000000 -0400
67216 +++ linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-08-05 19:44:37.000000000 -0400
67217 @@ -70,7 +70,7 @@ struct nfulnl_instance {
67218 };
67219
67220 static DEFINE_SPINLOCK(instances_lock);
67221 -static atomic_t global_seq;
67222 +static atomic_unchecked_t global_seq;
67223
67224 #define INSTANCE_BUCKETS 16
67225 static struct hlist_head instance_table[INSTANCE_BUCKETS];
67226 @@ -506,7 +506,7 @@ __build_packet_message(struct nfulnl_ins
67227 /* global sequence number */
67228 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
67229 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
67230 - htonl(atomic_inc_return(&global_seq)));
67231 + htonl(atomic_inc_return_unchecked(&global_seq)));
67232
67233 if (data_len) {
67234 struct nlattr *nla;
67235 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_queue.c linux-2.6.39.4/net/netfilter/nfnetlink_queue.c
67236 --- linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-05-19 00:06:34.000000000 -0400
67237 +++ linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-08-05 19:44:37.000000000 -0400
67238 @@ -58,7 +58,7 @@ struct nfqnl_instance {
67239 */
67240 spinlock_t lock;
67241 unsigned int queue_total;
67242 - atomic_t id_sequence; /* 'sequence' of pkt ids */
67243 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
67244 struct list_head queue_list; /* packets in queue */
67245 };
67246
67247 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
67248 nfmsg->version = NFNETLINK_V0;
67249 nfmsg->res_id = htons(queue->queue_num);
67250
67251 - entry->id = atomic_inc_return(&queue->id_sequence);
67252 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
67253 pmsg.packet_id = htonl(entry->id);
67254 pmsg.hw_protocol = entskb->protocol;
67255 pmsg.hook = entry->hook;
67256 @@ -869,7 +869,7 @@ static int seq_show(struct seq_file *s,
67257 inst->peer_pid, inst->queue_total,
67258 inst->copy_mode, inst->copy_range,
67259 inst->queue_dropped, inst->queue_user_dropped,
67260 - atomic_read(&inst->id_sequence), 1);
67261 + atomic_read_unchecked(&inst->id_sequence), 1);
67262 }
67263
67264 static const struct seq_operations nfqnl_seq_ops = {
67265 diff -urNp linux-2.6.39.4/net/netfilter/xt_gradm.c linux-2.6.39.4/net/netfilter/xt_gradm.c
67266 --- linux-2.6.39.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
67267 +++ linux-2.6.39.4/net/netfilter/xt_gradm.c 2011-08-05 19:44:37.000000000 -0400
67268 @@ -0,0 +1,51 @@
67269 +/*
67270 + * gradm match for netfilter
67271 + * Copyright © Zbigniew Krzystolik, 2010
67272 + *
67273 + * This program is free software; you can redistribute it and/or modify
67274 + * it under the terms of the GNU General Public License; either version
67275 + * 2 or 3 as published by the Free Software Foundation.
67276 + */
67277 +#include <linux/module.h>
67278 +#include <linux/moduleparam.h>
67279 +#include <linux/skbuff.h>
67280 +#include <linux/netfilter/x_tables.h>
67281 +#include <linux/grsecurity.h>
67282 +#include <linux/netfilter/xt_gradm.h>
67283 +
67284 +static bool
67285 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
67286 +{
67287 + const struct xt_gradm_mtinfo *info = par->matchinfo;
67288 + bool retval = false;
67289 + if (gr_acl_is_enabled())
67290 + retval = true;
67291 + return retval ^ info->invflags;
67292 +}
67293 +
67294 +static struct xt_match gradm_mt_reg __read_mostly = {
67295 + .name = "gradm",
67296 + .revision = 0,
67297 + .family = NFPROTO_UNSPEC,
67298 + .match = gradm_mt,
67299 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
67300 + .me = THIS_MODULE,
67301 +};
67302 +
67303 +static int __init gradm_mt_init(void)
67304 +{
67305 + return xt_register_match(&gradm_mt_reg);
67306 +}
67307 +
67308 +static void __exit gradm_mt_exit(void)
67309 +{
67310 + xt_unregister_match(&gradm_mt_reg);
67311 +}
67312 +
67313 +module_init(gradm_mt_init);
67314 +module_exit(gradm_mt_exit);
67315 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
67316 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
67317 +MODULE_LICENSE("GPL");
67318 +MODULE_ALIAS("ipt_gradm");
67319 +MODULE_ALIAS("ip6t_gradm");
67320 diff -urNp linux-2.6.39.4/net/netfilter/xt_statistic.c linux-2.6.39.4/net/netfilter/xt_statistic.c
67321 --- linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-05-19 00:06:34.000000000 -0400
67322 +++ linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-08-05 19:44:37.000000000 -0400
67323 @@ -18,7 +18,7 @@
67324 #include <linux/netfilter/x_tables.h>
67325
67326 struct xt_statistic_priv {
67327 - atomic_t count;
67328 + atomic_unchecked_t count;
67329 } ____cacheline_aligned_in_smp;
67330
67331 MODULE_LICENSE("GPL");
67332 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
67333 break;
67334 case XT_STATISTIC_MODE_NTH:
67335 do {
67336 - oval = atomic_read(&info->master->count);
67337 + oval = atomic_read_unchecked(&info->master->count);
67338 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
67339 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
67340 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
67341 if (nval == 0)
67342 ret = !ret;
67343 break;
67344 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
67345 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
67346 if (info->master == NULL)
67347 return -ENOMEM;
67348 - atomic_set(&info->master->count, info->u.nth.count);
67349 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
67350
67351 return 0;
67352 }
67353 diff -urNp linux-2.6.39.4/net/netlink/af_netlink.c linux-2.6.39.4/net/netlink/af_netlink.c
67354 --- linux-2.6.39.4/net/netlink/af_netlink.c 2011-05-19 00:06:34.000000000 -0400
67355 +++ linux-2.6.39.4/net/netlink/af_netlink.c 2011-08-05 19:44:37.000000000 -0400
67356 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
67357 sk->sk_error_report(sk);
67358 }
67359 }
67360 - atomic_inc(&sk->sk_drops);
67361 + atomic_inc_unchecked(&sk->sk_drops);
67362 }
67363
67364 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
67365 @@ -1992,15 +1992,23 @@ static int netlink_seq_show(struct seq_f
67366 struct netlink_sock *nlk = nlk_sk(s);
67367
67368 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
67369 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67370 + NULL,
67371 +#else
67372 s,
67373 +#endif
67374 s->sk_protocol,
67375 nlk->pid,
67376 nlk->groups ? (u32)nlk->groups[0] : 0,
67377 sk_rmem_alloc_get(s),
67378 sk_wmem_alloc_get(s),
67379 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67380 + NULL,
67381 +#else
67382 nlk->cb,
67383 +#endif
67384 atomic_read(&s->sk_refcnt),
67385 - atomic_read(&s->sk_drops),
67386 + atomic_read_unchecked(&s->sk_drops),
67387 sock_i_ino(s)
67388 );
67389
67390 diff -urNp linux-2.6.39.4/net/netrom/af_netrom.c linux-2.6.39.4/net/netrom/af_netrom.c
67391 --- linux-2.6.39.4/net/netrom/af_netrom.c 2011-05-19 00:06:34.000000000 -0400
67392 +++ linux-2.6.39.4/net/netrom/af_netrom.c 2011-08-05 19:44:37.000000000 -0400
67393 @@ -840,6 +840,7 @@ static int nr_getname(struct socket *soc
67394 struct sock *sk = sock->sk;
67395 struct nr_sock *nr = nr_sk(sk);
67396
67397 + memset(sax, 0, sizeof(*sax));
67398 lock_sock(sk);
67399 if (peer != 0) {
67400 if (sk->sk_state != TCP_ESTABLISHED) {
67401 @@ -854,7 +855,6 @@ static int nr_getname(struct socket *soc
67402 *uaddr_len = sizeof(struct full_sockaddr_ax25);
67403 } else {
67404 sax->fsa_ax25.sax25_family = AF_NETROM;
67405 - sax->fsa_ax25.sax25_ndigis = 0;
67406 sax->fsa_ax25.sax25_call = nr->source_addr;
67407 *uaddr_len = sizeof(struct sockaddr_ax25);
67408 }
67409 diff -urNp linux-2.6.39.4/net/packet/af_packet.c linux-2.6.39.4/net/packet/af_packet.c
67410 --- linux-2.6.39.4/net/packet/af_packet.c 2011-07-09 09:18:51.000000000 -0400
67411 +++ linux-2.6.39.4/net/packet/af_packet.c 2011-08-05 19:44:37.000000000 -0400
67412 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
67413
67414 spin_lock(&sk->sk_receive_queue.lock);
67415 po->stats.tp_packets++;
67416 - skb->dropcount = atomic_read(&sk->sk_drops);
67417 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
67418 __skb_queue_tail(&sk->sk_receive_queue, skb);
67419 spin_unlock(&sk->sk_receive_queue.lock);
67420 sk->sk_data_ready(sk, skb->len);
67421 return 0;
67422
67423 drop_n_acct:
67424 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
67425 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
67426
67427 drop_n_restore:
67428 if (skb_head != skb->data && skb_shared(skb)) {
67429 @@ -2159,7 +2159,7 @@ static int packet_getsockopt(struct sock
67430 case PACKET_HDRLEN:
67431 if (len > sizeof(int))
67432 len = sizeof(int);
67433 - if (copy_from_user(&val, optval, len))
67434 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
67435 return -EFAULT;
67436 switch (val) {
67437 case TPACKET_V1:
67438 @@ -2197,7 +2197,7 @@ static int packet_getsockopt(struct sock
67439
67440 if (put_user(len, optlen))
67441 return -EFAULT;
67442 - if (copy_to_user(optval, data, len))
67443 + if (len > sizeof(st) || copy_to_user(optval, data, len))
67444 return -EFAULT;
67445 return 0;
67446 }
67447 @@ -2709,7 +2709,11 @@ static int packet_seq_show(struct seq_fi
67448
67449 seq_printf(seq,
67450 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
67451 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67452 + NULL,
67453 +#else
67454 s,
67455 +#endif
67456 atomic_read(&s->sk_refcnt),
67457 s->sk_type,
67458 ntohs(po->num),
67459 diff -urNp linux-2.6.39.4/net/phonet/af_phonet.c linux-2.6.39.4/net/phonet/af_phonet.c
67460 --- linux-2.6.39.4/net/phonet/af_phonet.c 2011-05-19 00:06:34.000000000 -0400
67461 +++ linux-2.6.39.4/net/phonet/af_phonet.c 2011-08-05 20:34:06.000000000 -0400
67462 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
67463 {
67464 struct phonet_protocol *pp;
67465
67466 - if (protocol >= PHONET_NPROTO)
67467 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67468 return NULL;
67469
67470 rcu_read_lock();
67471 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
67472 {
67473 int err = 0;
67474
67475 - if (protocol >= PHONET_NPROTO)
67476 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67477 return -EINVAL;
67478
67479 err = proto_register(pp->prot, 1);
67480 diff -urNp linux-2.6.39.4/net/phonet/pep.c linux-2.6.39.4/net/phonet/pep.c
67481 --- linux-2.6.39.4/net/phonet/pep.c 2011-05-19 00:06:34.000000000 -0400
67482 +++ linux-2.6.39.4/net/phonet/pep.c 2011-08-05 19:44:37.000000000 -0400
67483 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
67484
67485 case PNS_PEP_CTRL_REQ:
67486 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
67487 - atomic_inc(&sk->sk_drops);
67488 + atomic_inc_unchecked(&sk->sk_drops);
67489 break;
67490 }
67491 __skb_pull(skb, 4);
67492 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
67493 }
67494
67495 if (pn->rx_credits == 0) {
67496 - atomic_inc(&sk->sk_drops);
67497 + atomic_inc_unchecked(&sk->sk_drops);
67498 err = -ENOBUFS;
67499 break;
67500 }
67501 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
67502 }
67503
67504 if (pn->rx_credits == 0) {
67505 - atomic_inc(&sk->sk_drops);
67506 + atomic_inc_unchecked(&sk->sk_drops);
67507 err = NET_RX_DROP;
67508 break;
67509 }
67510 diff -urNp linux-2.6.39.4/net/phonet/socket.c linux-2.6.39.4/net/phonet/socket.c
67511 --- linux-2.6.39.4/net/phonet/socket.c 2011-05-19 00:06:34.000000000 -0400
67512 +++ linux-2.6.39.4/net/phonet/socket.c 2011-08-05 19:44:37.000000000 -0400
67513 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_f
67514 pn->resource, sk->sk_state,
67515 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
67516 sock_i_uid(sk), sock_i_ino(sk),
67517 - atomic_read(&sk->sk_refcnt), sk,
67518 - atomic_read(&sk->sk_drops), &len);
67519 + atomic_read(&sk->sk_refcnt),
67520 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67521 + NULL,
67522 +#else
67523 + sk,
67524 +#endif
67525 + atomic_read_unchecked(&sk->sk_drops), &len);
67526 }
67527 seq_printf(seq, "%*s\n", 127 - len, "");
67528 return 0;
67529 diff -urNp linux-2.6.39.4/net/rds/cong.c linux-2.6.39.4/net/rds/cong.c
67530 --- linux-2.6.39.4/net/rds/cong.c 2011-05-19 00:06:34.000000000 -0400
67531 +++ linux-2.6.39.4/net/rds/cong.c 2011-08-05 19:44:37.000000000 -0400
67532 @@ -77,7 +77,7 @@
67533 * finds that the saved generation number is smaller than the global generation
67534 * number, it wakes up the process.
67535 */
67536 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
67537 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
67538
67539 /*
67540 * Congestion monitoring
67541 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
67542 rdsdebug("waking map %p for %pI4\n",
67543 map, &map->m_addr);
67544 rds_stats_inc(s_cong_update_received);
67545 - atomic_inc(&rds_cong_generation);
67546 + atomic_inc_unchecked(&rds_cong_generation);
67547 if (waitqueue_active(&map->m_waitq))
67548 wake_up(&map->m_waitq);
67549 if (waitqueue_active(&rds_poll_waitq))
67550 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
67551
67552 int rds_cong_updated_since(unsigned long *recent)
67553 {
67554 - unsigned long gen = atomic_read(&rds_cong_generation);
67555 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
67556
67557 if (likely(*recent == gen))
67558 return 0;
67559 diff -urNp linux-2.6.39.4/net/rds/ib_cm.c linux-2.6.39.4/net/rds/ib_cm.c
67560 --- linux-2.6.39.4/net/rds/ib_cm.c 2011-05-19 00:06:34.000000000 -0400
67561 +++ linux-2.6.39.4/net/rds/ib_cm.c 2011-08-05 19:44:37.000000000 -0400
67562 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
67563 /* Clear the ACK state */
67564 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67565 #ifdef KERNEL_HAS_ATOMIC64
67566 - atomic64_set(&ic->i_ack_next, 0);
67567 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67568 #else
67569 ic->i_ack_next = 0;
67570 #endif
67571 diff -urNp linux-2.6.39.4/net/rds/ib.h linux-2.6.39.4/net/rds/ib.h
67572 --- linux-2.6.39.4/net/rds/ib.h 2011-05-19 00:06:34.000000000 -0400
67573 +++ linux-2.6.39.4/net/rds/ib.h 2011-08-05 19:44:37.000000000 -0400
67574 @@ -127,7 +127,7 @@ struct rds_ib_connection {
67575 /* sending acks */
67576 unsigned long i_ack_flags;
67577 #ifdef KERNEL_HAS_ATOMIC64
67578 - atomic64_t i_ack_next; /* next ACK to send */
67579 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67580 #else
67581 spinlock_t i_ack_lock; /* protect i_ack_next */
67582 u64 i_ack_next; /* next ACK to send */
67583 diff -urNp linux-2.6.39.4/net/rds/ib_recv.c linux-2.6.39.4/net/rds/ib_recv.c
67584 --- linux-2.6.39.4/net/rds/ib_recv.c 2011-05-19 00:06:34.000000000 -0400
67585 +++ linux-2.6.39.4/net/rds/ib_recv.c 2011-08-05 19:44:37.000000000 -0400
67586 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67587 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
67588 int ack_required)
67589 {
67590 - atomic64_set(&ic->i_ack_next, seq);
67591 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67592 if (ack_required) {
67593 smp_mb__before_clear_bit();
67594 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67595 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67596 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67597 smp_mb__after_clear_bit();
67598
67599 - return atomic64_read(&ic->i_ack_next);
67600 + return atomic64_read_unchecked(&ic->i_ack_next);
67601 }
67602 #endif
67603
67604 diff -urNp linux-2.6.39.4/net/rds/iw_cm.c linux-2.6.39.4/net/rds/iw_cm.c
67605 --- linux-2.6.39.4/net/rds/iw_cm.c 2011-05-19 00:06:34.000000000 -0400
67606 +++ linux-2.6.39.4/net/rds/iw_cm.c 2011-08-05 19:44:37.000000000 -0400
67607 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
67608 /* Clear the ACK state */
67609 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67610 #ifdef KERNEL_HAS_ATOMIC64
67611 - atomic64_set(&ic->i_ack_next, 0);
67612 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67613 #else
67614 ic->i_ack_next = 0;
67615 #endif
67616 diff -urNp linux-2.6.39.4/net/rds/iw.h linux-2.6.39.4/net/rds/iw.h
67617 --- linux-2.6.39.4/net/rds/iw.h 2011-05-19 00:06:34.000000000 -0400
67618 +++ linux-2.6.39.4/net/rds/iw.h 2011-08-05 19:44:37.000000000 -0400
67619 @@ -133,7 +133,7 @@ struct rds_iw_connection {
67620 /* sending acks */
67621 unsigned long i_ack_flags;
67622 #ifdef KERNEL_HAS_ATOMIC64
67623 - atomic64_t i_ack_next; /* next ACK to send */
67624 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67625 #else
67626 spinlock_t i_ack_lock; /* protect i_ack_next */
67627 u64 i_ack_next; /* next ACK to send */
67628 diff -urNp linux-2.6.39.4/net/rds/iw_rdma.c linux-2.6.39.4/net/rds/iw_rdma.c
67629 --- linux-2.6.39.4/net/rds/iw_rdma.c 2011-05-19 00:06:34.000000000 -0400
67630 +++ linux-2.6.39.4/net/rds/iw_rdma.c 2011-08-05 19:44:37.000000000 -0400
67631 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
67632 struct rdma_cm_id *pcm_id;
67633 int rc;
67634
67635 + pax_track_stack();
67636 +
67637 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
67638 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
67639
67640 diff -urNp linux-2.6.39.4/net/rds/iw_recv.c linux-2.6.39.4/net/rds/iw_recv.c
67641 --- linux-2.6.39.4/net/rds/iw_recv.c 2011-05-19 00:06:34.000000000 -0400
67642 +++ linux-2.6.39.4/net/rds/iw_recv.c 2011-08-05 19:44:37.000000000 -0400
67643 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67644 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
67645 int ack_required)
67646 {
67647 - atomic64_set(&ic->i_ack_next, seq);
67648 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67649 if (ack_required) {
67650 smp_mb__before_clear_bit();
67651 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67652 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67653 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67654 smp_mb__after_clear_bit();
67655
67656 - return atomic64_read(&ic->i_ack_next);
67657 + return atomic64_read_unchecked(&ic->i_ack_next);
67658 }
67659 #endif
67660
67661 diff -urNp linux-2.6.39.4/net/rxrpc/af_rxrpc.c linux-2.6.39.4/net/rxrpc/af_rxrpc.c
67662 --- linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-05-19 00:06:34.000000000 -0400
67663 +++ linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-08-05 19:44:37.000000000 -0400
67664 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
67665 __be32 rxrpc_epoch;
67666
67667 /* current debugging ID */
67668 -atomic_t rxrpc_debug_id;
67669 +atomic_unchecked_t rxrpc_debug_id;
67670
67671 /* count of skbs currently in use */
67672 atomic_t rxrpc_n_skbs;
67673 diff -urNp linux-2.6.39.4/net/rxrpc/ar-ack.c linux-2.6.39.4/net/rxrpc/ar-ack.c
67674 --- linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-05-19 00:06:34.000000000 -0400
67675 +++ linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-08-05 19:44:37.000000000 -0400
67676 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
67677
67678 _enter("{%d,%d,%d,%d},",
67679 call->acks_hard, call->acks_unacked,
67680 - atomic_read(&call->sequence),
67681 + atomic_read_unchecked(&call->sequence),
67682 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
67683
67684 stop = 0;
67685 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
67686
67687 /* each Tx packet has a new serial number */
67688 sp->hdr.serial =
67689 - htonl(atomic_inc_return(&call->conn->serial));
67690 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
67691
67692 hdr = (struct rxrpc_header *) txb->head;
67693 hdr->serial = sp->hdr.serial;
67694 @@ -405,7 +405,7 @@ static void rxrpc_rotate_tx_window(struc
67695 */
67696 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
67697 {
67698 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
67699 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
67700 }
67701
67702 /*
67703 @@ -631,7 +631,7 @@ process_further:
67704
67705 latest = ntohl(sp->hdr.serial);
67706 hard = ntohl(ack.firstPacket);
67707 - tx = atomic_read(&call->sequence);
67708 + tx = atomic_read_unchecked(&call->sequence);
67709
67710 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67711 latest,
67712 @@ -844,6 +844,8 @@ void rxrpc_process_call(struct work_stru
67713 u32 abort_code = RX_PROTOCOL_ERROR;
67714 u8 *acks = NULL;
67715
67716 + pax_track_stack();
67717 +
67718 //printk("\n--------------------\n");
67719 _enter("{%d,%s,%lx} [%lu]",
67720 call->debug_id, rxrpc_call_states[call->state], call->events,
67721 @@ -1163,7 +1165,7 @@ void rxrpc_process_call(struct work_stru
67722 goto maybe_reschedule;
67723
67724 send_ACK_with_skew:
67725 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
67726 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
67727 ntohl(ack.serial));
67728 send_ACK:
67729 mtu = call->conn->trans->peer->if_mtu;
67730 @@ -1175,7 +1177,7 @@ send_ACK:
67731 ackinfo.rxMTU = htonl(5692);
67732 ackinfo.jumbo_max = htonl(4);
67733
67734 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67735 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67736 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67737 ntohl(hdr.serial),
67738 ntohs(ack.maxSkew),
67739 @@ -1193,7 +1195,7 @@ send_ACK:
67740 send_message:
67741 _debug("send message");
67742
67743 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67744 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67745 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
67746 send_message_2:
67747
67748 diff -urNp linux-2.6.39.4/net/rxrpc/ar-call.c linux-2.6.39.4/net/rxrpc/ar-call.c
67749 --- linux-2.6.39.4/net/rxrpc/ar-call.c 2011-05-19 00:06:34.000000000 -0400
67750 +++ linux-2.6.39.4/net/rxrpc/ar-call.c 2011-08-05 19:44:37.000000000 -0400
67751 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
67752 spin_lock_init(&call->lock);
67753 rwlock_init(&call->state_lock);
67754 atomic_set(&call->usage, 1);
67755 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67756 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67757 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
67758
67759 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
67760 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connection.c linux-2.6.39.4/net/rxrpc/ar-connection.c
67761 --- linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-05-19 00:06:34.000000000 -0400
67762 +++ linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-08-05 19:44:37.000000000 -0400
67763 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
67764 rwlock_init(&conn->lock);
67765 spin_lock_init(&conn->state_lock);
67766 atomic_set(&conn->usage, 1);
67767 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
67768 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67769 conn->avail_calls = RXRPC_MAXCALLS;
67770 conn->size_align = 4;
67771 conn->header_size = sizeof(struct rxrpc_header);
67772 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connevent.c linux-2.6.39.4/net/rxrpc/ar-connevent.c
67773 --- linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-05-19 00:06:34.000000000 -0400
67774 +++ linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-08-05 19:44:37.000000000 -0400
67775 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
67776
67777 len = iov[0].iov_len + iov[1].iov_len;
67778
67779 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67780 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67781 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
67782
67783 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67784 diff -urNp linux-2.6.39.4/net/rxrpc/ar-input.c linux-2.6.39.4/net/rxrpc/ar-input.c
67785 --- linux-2.6.39.4/net/rxrpc/ar-input.c 2011-05-19 00:06:34.000000000 -0400
67786 +++ linux-2.6.39.4/net/rxrpc/ar-input.c 2011-08-05 19:44:37.000000000 -0400
67787 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
67788 /* track the latest serial number on this connection for ACK packet
67789 * information */
67790 serial = ntohl(sp->hdr.serial);
67791 - hi_serial = atomic_read(&call->conn->hi_serial);
67792 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
67793 while (serial > hi_serial)
67794 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
67795 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
67796 serial);
67797
67798 /* request ACK generation for any ACK or DATA packet that requests
67799 diff -urNp linux-2.6.39.4/net/rxrpc/ar-internal.h linux-2.6.39.4/net/rxrpc/ar-internal.h
67800 --- linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-05-19 00:06:34.000000000 -0400
67801 +++ linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-08-05 19:44:37.000000000 -0400
67802 @@ -272,8 +272,8 @@ struct rxrpc_connection {
67803 int error; /* error code for local abort */
67804 int debug_id; /* debug ID for printks */
67805 unsigned call_counter; /* call ID counter */
67806 - atomic_t serial; /* packet serial number counter */
67807 - atomic_t hi_serial; /* highest serial number received */
67808 + atomic_unchecked_t serial; /* packet serial number counter */
67809 + atomic_unchecked_t hi_serial; /* highest serial number received */
67810 u8 avail_calls; /* number of calls available */
67811 u8 size_align; /* data size alignment (for security) */
67812 u8 header_size; /* rxrpc + security header size */
67813 @@ -346,7 +346,7 @@ struct rxrpc_call {
67814 spinlock_t lock;
67815 rwlock_t state_lock; /* lock for state transition */
67816 atomic_t usage;
67817 - atomic_t sequence; /* Tx data packet sequence counter */
67818 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
67819 u32 abort_code; /* local/remote abort code */
67820 enum { /* current state of call */
67821 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
67822 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
67823 */
67824 extern atomic_t rxrpc_n_skbs;
67825 extern __be32 rxrpc_epoch;
67826 -extern atomic_t rxrpc_debug_id;
67827 +extern atomic_unchecked_t rxrpc_debug_id;
67828 extern struct workqueue_struct *rxrpc_workqueue;
67829
67830 /*
67831 diff -urNp linux-2.6.39.4/net/rxrpc/ar-local.c linux-2.6.39.4/net/rxrpc/ar-local.c
67832 --- linux-2.6.39.4/net/rxrpc/ar-local.c 2011-05-19 00:06:34.000000000 -0400
67833 +++ linux-2.6.39.4/net/rxrpc/ar-local.c 2011-08-05 19:44:37.000000000 -0400
67834 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
67835 spin_lock_init(&local->lock);
67836 rwlock_init(&local->services_lock);
67837 atomic_set(&local->usage, 1);
67838 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
67839 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67840 memcpy(&local->srx, srx, sizeof(*srx));
67841 }
67842
67843 diff -urNp linux-2.6.39.4/net/rxrpc/ar-output.c linux-2.6.39.4/net/rxrpc/ar-output.c
67844 --- linux-2.6.39.4/net/rxrpc/ar-output.c 2011-05-19 00:06:34.000000000 -0400
67845 +++ linux-2.6.39.4/net/rxrpc/ar-output.c 2011-08-05 19:44:37.000000000 -0400
67846 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
67847 sp->hdr.cid = call->cid;
67848 sp->hdr.callNumber = call->call_id;
67849 sp->hdr.seq =
67850 - htonl(atomic_inc_return(&call->sequence));
67851 + htonl(atomic_inc_return_unchecked(&call->sequence));
67852 sp->hdr.serial =
67853 - htonl(atomic_inc_return(&conn->serial));
67854 + htonl(atomic_inc_return_unchecked(&conn->serial));
67855 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
67856 sp->hdr.userStatus = 0;
67857 sp->hdr.securityIndex = conn->security_ix;
67858 diff -urNp linux-2.6.39.4/net/rxrpc/ar-peer.c linux-2.6.39.4/net/rxrpc/ar-peer.c
67859 --- linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-05-19 00:06:34.000000000 -0400
67860 +++ linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-08-05 19:44:37.000000000 -0400
67861 @@ -71,7 +71,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
67862 INIT_LIST_HEAD(&peer->error_targets);
67863 spin_lock_init(&peer->lock);
67864 atomic_set(&peer->usage, 1);
67865 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
67866 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67867 memcpy(&peer->srx, srx, sizeof(*srx));
67868
67869 rxrpc_assess_MTU_size(peer);
67870 diff -urNp linux-2.6.39.4/net/rxrpc/ar-proc.c linux-2.6.39.4/net/rxrpc/ar-proc.c
67871 --- linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-05-19 00:06:34.000000000 -0400
67872 +++ linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-08-05 19:44:37.000000000 -0400
67873 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
67874 atomic_read(&conn->usage),
67875 rxrpc_conn_states[conn->state],
67876 key_serial(conn->key),
67877 - atomic_read(&conn->serial),
67878 - atomic_read(&conn->hi_serial));
67879 + atomic_read_unchecked(&conn->serial),
67880 + atomic_read_unchecked(&conn->hi_serial));
67881
67882 return 0;
67883 }
67884 diff -urNp linux-2.6.39.4/net/rxrpc/ar-transport.c linux-2.6.39.4/net/rxrpc/ar-transport.c
67885 --- linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-05-19 00:06:34.000000000 -0400
67886 +++ linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-08-05 19:44:37.000000000 -0400
67887 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
67888 spin_lock_init(&trans->client_lock);
67889 rwlock_init(&trans->conn_lock);
67890 atomic_set(&trans->usage, 1);
67891 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
67892 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67893
67894 if (peer->srx.transport.family == AF_INET) {
67895 switch (peer->srx.transport_type) {
67896 diff -urNp linux-2.6.39.4/net/rxrpc/rxkad.c linux-2.6.39.4/net/rxrpc/rxkad.c
67897 --- linux-2.6.39.4/net/rxrpc/rxkad.c 2011-05-19 00:06:34.000000000 -0400
67898 +++ linux-2.6.39.4/net/rxrpc/rxkad.c 2011-08-05 19:44:37.000000000 -0400
67899 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
67900 u16 check;
67901 int nsg;
67902
67903 + pax_track_stack();
67904 +
67905 sp = rxrpc_skb(skb);
67906
67907 _enter("");
67908 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
67909 u16 check;
67910 int nsg;
67911
67912 + pax_track_stack();
67913 +
67914 _enter("");
67915
67916 sp = rxrpc_skb(skb);
67917 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
67918
67919 len = iov[0].iov_len + iov[1].iov_len;
67920
67921 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67922 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67923 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
67924
67925 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67926 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
67927
67928 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
67929
67930 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
67931 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67932 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
67933
67934 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
67935 diff -urNp linux-2.6.39.4/net/sctp/proc.c linux-2.6.39.4/net/sctp/proc.c
67936 --- linux-2.6.39.4/net/sctp/proc.c 2011-05-19 00:06:34.000000000 -0400
67937 +++ linux-2.6.39.4/net/sctp/proc.c 2011-08-05 19:44:37.000000000 -0400
67938 @@ -212,7 +212,12 @@ static int sctp_eps_seq_show(struct seq_
67939 sctp_for_each_hentry(epb, node, &head->chain) {
67940 ep = sctp_ep(epb);
67941 sk = epb->sk;
67942 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
67943 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
67944 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67945 + NULL, NULL,
67946 +#else
67947 + ep, sk,
67948 +#endif
67949 sctp_sk(sk)->type, sk->sk_state, hash,
67950 epb->bind_addr.port,
67951 sock_i_uid(sk), sock_i_ino(sk));
67952 @@ -318,7 +323,12 @@ static int sctp_assocs_seq_show(struct s
67953 seq_printf(seq,
67954 "%8p %8p %-3d %-3d %-2d %-4d "
67955 "%4d %8d %8d %7d %5lu %-5d %5d ",
67956 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
67957 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67958 + NULL, NULL,
67959 +#else
67960 + assoc, sk,
67961 +#endif
67962 + sctp_sk(sk)->type, sk->sk_state,
67963 assoc->state, hash,
67964 assoc->assoc_id,
67965 assoc->sndbuf_used,
67966 diff -urNp linux-2.6.39.4/net/sctp/socket.c linux-2.6.39.4/net/sctp/socket.c
67967 --- linux-2.6.39.4/net/sctp/socket.c 2011-05-19 00:06:34.000000000 -0400
67968 +++ linux-2.6.39.4/net/sctp/socket.c 2011-08-05 19:44:37.000000000 -0400
67969 @@ -4433,7 +4433,7 @@ static int sctp_getsockopt_peer_addrs(st
67970 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
67971 if (space_left < addrlen)
67972 return -ENOMEM;
67973 - if (copy_to_user(to, &temp, addrlen))
67974 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
67975 return -EFAULT;
67976 to += addrlen;
67977 cnt++;
67978 diff -urNp linux-2.6.39.4/net/socket.c linux-2.6.39.4/net/socket.c
67979 --- linux-2.6.39.4/net/socket.c 2011-06-03 00:04:14.000000000 -0400
67980 +++ linux-2.6.39.4/net/socket.c 2011-08-05 19:44:37.000000000 -0400
67981 @@ -88,6 +88,7 @@
67982 #include <linux/nsproxy.h>
67983 #include <linux/magic.h>
67984 #include <linux/slab.h>
67985 +#include <linux/in.h>
67986
67987 #include <asm/uaccess.h>
67988 #include <asm/unistd.h>
67989 @@ -105,6 +106,8 @@
67990 #include <linux/sockios.h>
67991 #include <linux/atalk.h>
67992
67993 +#include <linux/grsock.h>
67994 +
67995 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
67996 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
67997 unsigned long nr_segs, loff_t pos);
67998 @@ -330,7 +333,7 @@ static struct dentry *sockfs_mount(struc
67999 &sockfs_dentry_operations, SOCKFS_MAGIC);
68000 }
68001
68002 -static struct vfsmount *sock_mnt __read_mostly;
68003 +struct vfsmount *sock_mnt __read_mostly;
68004
68005 static struct file_system_type sock_fs_type = {
68006 .name = "sockfs",
68007 @@ -1179,6 +1182,8 @@ int __sock_create(struct net *net, int f
68008 return -EAFNOSUPPORT;
68009 if (type < 0 || type >= SOCK_MAX)
68010 return -EINVAL;
68011 + if (protocol < 0)
68012 + return -EINVAL;
68013
68014 /* Compatibility.
68015
68016 @@ -1311,6 +1316,16 @@ SYSCALL_DEFINE3(socket, int, family, int
68017 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
68018 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
68019
68020 + if(!gr_search_socket(family, type, protocol)) {
68021 + retval = -EACCES;
68022 + goto out;
68023 + }
68024 +
68025 + if (gr_handle_sock_all(family, type, protocol)) {
68026 + retval = -EACCES;
68027 + goto out;
68028 + }
68029 +
68030 retval = sock_create(family, type, protocol, &sock);
68031 if (retval < 0)
68032 goto out;
68033 @@ -1423,6 +1438,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
68034 if (sock) {
68035 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
68036 if (err >= 0) {
68037 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
68038 + err = -EACCES;
68039 + goto error;
68040 + }
68041 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
68042 + if (err)
68043 + goto error;
68044 +
68045 err = security_socket_bind(sock,
68046 (struct sockaddr *)&address,
68047 addrlen);
68048 @@ -1431,6 +1454,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
68049 (struct sockaddr *)
68050 &address, addrlen);
68051 }
68052 +error:
68053 fput_light(sock->file, fput_needed);
68054 }
68055 return err;
68056 @@ -1454,10 +1478,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
68057 if ((unsigned)backlog > somaxconn)
68058 backlog = somaxconn;
68059
68060 + if (gr_handle_sock_server_other(sock->sk)) {
68061 + err = -EPERM;
68062 + goto error;
68063 + }
68064 +
68065 + err = gr_search_listen(sock);
68066 + if (err)
68067 + goto error;
68068 +
68069 err = security_socket_listen(sock, backlog);
68070 if (!err)
68071 err = sock->ops->listen(sock, backlog);
68072
68073 +error:
68074 fput_light(sock->file, fput_needed);
68075 }
68076 return err;
68077 @@ -1501,6 +1535,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
68078 newsock->type = sock->type;
68079 newsock->ops = sock->ops;
68080
68081 + if (gr_handle_sock_server_other(sock->sk)) {
68082 + err = -EPERM;
68083 + sock_release(newsock);
68084 + goto out_put;
68085 + }
68086 +
68087 + err = gr_search_accept(sock);
68088 + if (err) {
68089 + sock_release(newsock);
68090 + goto out_put;
68091 + }
68092 +
68093 /*
68094 * We don't need try_module_get here, as the listening socket (sock)
68095 * has the protocol module (sock->ops->owner) held.
68096 @@ -1539,6 +1585,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
68097 fd_install(newfd, newfile);
68098 err = newfd;
68099
68100 + gr_attach_curr_ip(newsock->sk);
68101 +
68102 out_put:
68103 fput_light(sock->file, fput_needed);
68104 out:
68105 @@ -1571,6 +1619,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68106 int, addrlen)
68107 {
68108 struct socket *sock;
68109 + struct sockaddr *sck;
68110 struct sockaddr_storage address;
68111 int err, fput_needed;
68112
68113 @@ -1581,6 +1630,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68114 if (err < 0)
68115 goto out_put;
68116
68117 + sck = (struct sockaddr *)&address;
68118 +
68119 + if (gr_handle_sock_client(sck)) {
68120 + err = -EACCES;
68121 + goto out_put;
68122 + }
68123 +
68124 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
68125 + if (err)
68126 + goto out_put;
68127 +
68128 err =
68129 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
68130 if (err)
68131 @@ -1882,6 +1942,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
68132 int err, ctl_len, iov_size, total_len;
68133 int fput_needed;
68134
68135 + pax_track_stack();
68136 +
68137 err = -EFAULT;
68138 if (MSG_CMSG_COMPAT & flags) {
68139 if (get_compat_msghdr(&msg_sys, msg_compat))
68140 diff -urNp linux-2.6.39.4/net/sunrpc/sched.c linux-2.6.39.4/net/sunrpc/sched.c
68141 --- linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:11:51.000000000 -0400
68142 +++ linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:12:20.000000000 -0400
68143 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
68144 #ifdef RPC_DEBUG
68145 static void rpc_task_set_debuginfo(struct rpc_task *task)
68146 {
68147 - static atomic_t rpc_pid;
68148 + static atomic_unchecked_t rpc_pid;
68149
68150 - task->tk_pid = atomic_inc_return(&rpc_pid);
68151 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
68152 }
68153 #else
68154 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
68155 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c
68156 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-19 00:06:34.000000000 -0400
68157 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-05 19:44:37.000000000 -0400
68158 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
68159 static unsigned int min_max_inline = 4096;
68160 static unsigned int max_max_inline = 65536;
68161
68162 -atomic_t rdma_stat_recv;
68163 -atomic_t rdma_stat_read;
68164 -atomic_t rdma_stat_write;
68165 -atomic_t rdma_stat_sq_starve;
68166 -atomic_t rdma_stat_rq_starve;
68167 -atomic_t rdma_stat_rq_poll;
68168 -atomic_t rdma_stat_rq_prod;
68169 -atomic_t rdma_stat_sq_poll;
68170 -atomic_t rdma_stat_sq_prod;
68171 +atomic_unchecked_t rdma_stat_recv;
68172 +atomic_unchecked_t rdma_stat_read;
68173 +atomic_unchecked_t rdma_stat_write;
68174 +atomic_unchecked_t rdma_stat_sq_starve;
68175 +atomic_unchecked_t rdma_stat_rq_starve;
68176 +atomic_unchecked_t rdma_stat_rq_poll;
68177 +atomic_unchecked_t rdma_stat_rq_prod;
68178 +atomic_unchecked_t rdma_stat_sq_poll;
68179 +atomic_unchecked_t rdma_stat_sq_prod;
68180
68181 /* Temporary NFS request map and context caches */
68182 struct kmem_cache *svc_rdma_map_cachep;
68183 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
68184 len -= *ppos;
68185 if (len > *lenp)
68186 len = *lenp;
68187 - if (len && copy_to_user(buffer, str_buf, len))
68188 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
68189 return -EFAULT;
68190 *lenp = len;
68191 *ppos += len;
68192 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
68193 {
68194 .procname = "rdma_stat_read",
68195 .data = &rdma_stat_read,
68196 - .maxlen = sizeof(atomic_t),
68197 + .maxlen = sizeof(atomic_unchecked_t),
68198 .mode = 0644,
68199 .proc_handler = read_reset_stat,
68200 },
68201 {
68202 .procname = "rdma_stat_recv",
68203 .data = &rdma_stat_recv,
68204 - .maxlen = sizeof(atomic_t),
68205 + .maxlen = sizeof(atomic_unchecked_t),
68206 .mode = 0644,
68207 .proc_handler = read_reset_stat,
68208 },
68209 {
68210 .procname = "rdma_stat_write",
68211 .data = &rdma_stat_write,
68212 - .maxlen = sizeof(atomic_t),
68213 + .maxlen = sizeof(atomic_unchecked_t),
68214 .mode = 0644,
68215 .proc_handler = read_reset_stat,
68216 },
68217 {
68218 .procname = "rdma_stat_sq_starve",
68219 .data = &rdma_stat_sq_starve,
68220 - .maxlen = sizeof(atomic_t),
68221 + .maxlen = sizeof(atomic_unchecked_t),
68222 .mode = 0644,
68223 .proc_handler = read_reset_stat,
68224 },
68225 {
68226 .procname = "rdma_stat_rq_starve",
68227 .data = &rdma_stat_rq_starve,
68228 - .maxlen = sizeof(atomic_t),
68229 + .maxlen = sizeof(atomic_unchecked_t),
68230 .mode = 0644,
68231 .proc_handler = read_reset_stat,
68232 },
68233 {
68234 .procname = "rdma_stat_rq_poll",
68235 .data = &rdma_stat_rq_poll,
68236 - .maxlen = sizeof(atomic_t),
68237 + .maxlen = sizeof(atomic_unchecked_t),
68238 .mode = 0644,
68239 .proc_handler = read_reset_stat,
68240 },
68241 {
68242 .procname = "rdma_stat_rq_prod",
68243 .data = &rdma_stat_rq_prod,
68244 - .maxlen = sizeof(atomic_t),
68245 + .maxlen = sizeof(atomic_unchecked_t),
68246 .mode = 0644,
68247 .proc_handler = read_reset_stat,
68248 },
68249 {
68250 .procname = "rdma_stat_sq_poll",
68251 .data = &rdma_stat_sq_poll,
68252 - .maxlen = sizeof(atomic_t),
68253 + .maxlen = sizeof(atomic_unchecked_t),
68254 .mode = 0644,
68255 .proc_handler = read_reset_stat,
68256 },
68257 {
68258 .procname = "rdma_stat_sq_prod",
68259 .data = &rdma_stat_sq_prod,
68260 - .maxlen = sizeof(atomic_t),
68261 + .maxlen = sizeof(atomic_unchecked_t),
68262 .mode = 0644,
68263 .proc_handler = read_reset_stat,
68264 },
68265 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
68266 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-19 00:06:34.000000000 -0400
68267 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-05 19:44:37.000000000 -0400
68268 @@ -499,7 +499,7 @@ next_sge:
68269 svc_rdma_put_context(ctxt, 0);
68270 goto out;
68271 }
68272 - atomic_inc(&rdma_stat_read);
68273 + atomic_inc_unchecked(&rdma_stat_read);
68274
68275 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
68276 chl_map->ch[ch_no].count -= read_wr.num_sge;
68277 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68278 dto_q);
68279 list_del_init(&ctxt->dto_q);
68280 } else {
68281 - atomic_inc(&rdma_stat_rq_starve);
68282 + atomic_inc_unchecked(&rdma_stat_rq_starve);
68283 clear_bit(XPT_DATA, &xprt->xpt_flags);
68284 ctxt = NULL;
68285 }
68286 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68287 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
68288 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
68289 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
68290 - atomic_inc(&rdma_stat_recv);
68291 + atomic_inc_unchecked(&rdma_stat_recv);
68292
68293 /* Build up the XDR from the receive buffers. */
68294 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
68295 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c
68296 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-19 00:06:34.000000000 -0400
68297 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-05 19:44:37.000000000 -0400
68298 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
68299 write_wr.wr.rdma.remote_addr = to;
68300
68301 /* Post It */
68302 - atomic_inc(&rdma_stat_write);
68303 + atomic_inc_unchecked(&rdma_stat_write);
68304 if (svc_rdma_send(xprt, &write_wr))
68305 goto err;
68306 return 0;
68307 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c
68308 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-19 00:06:34.000000000 -0400
68309 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-05 19:44:37.000000000 -0400
68310 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
68311 return;
68312
68313 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
68314 - atomic_inc(&rdma_stat_rq_poll);
68315 + atomic_inc_unchecked(&rdma_stat_rq_poll);
68316
68317 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
68318 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
68319 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
68320 }
68321
68322 if (ctxt)
68323 - atomic_inc(&rdma_stat_rq_prod);
68324 + atomic_inc_unchecked(&rdma_stat_rq_prod);
68325
68326 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
68327 /*
68328 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
68329 return;
68330
68331 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
68332 - atomic_inc(&rdma_stat_sq_poll);
68333 + atomic_inc_unchecked(&rdma_stat_sq_poll);
68334 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
68335 if (wc.status != IB_WC_SUCCESS)
68336 /* Close the transport */
68337 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
68338 }
68339
68340 if (ctxt)
68341 - atomic_inc(&rdma_stat_sq_prod);
68342 + atomic_inc_unchecked(&rdma_stat_sq_prod);
68343 }
68344
68345 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
68346 @@ -1271,7 +1271,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
68347 spin_lock_bh(&xprt->sc_lock);
68348 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
68349 spin_unlock_bh(&xprt->sc_lock);
68350 - atomic_inc(&rdma_stat_sq_starve);
68351 + atomic_inc_unchecked(&rdma_stat_sq_starve);
68352
68353 /* See if we can opportunistically reap SQ WR to make room */
68354 sq_cq_reap(xprt);
68355 diff -urNp linux-2.6.39.4/net/sysctl_net.c linux-2.6.39.4/net/sysctl_net.c
68356 --- linux-2.6.39.4/net/sysctl_net.c 2011-05-19 00:06:34.000000000 -0400
68357 +++ linux-2.6.39.4/net/sysctl_net.c 2011-08-05 19:44:37.000000000 -0400
68358 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
68359 struct ctl_table *table)
68360 {
68361 /* Allow network administrator to have same access as root. */
68362 - if (capable(CAP_NET_ADMIN)) {
68363 + if (capable_nolog(CAP_NET_ADMIN)) {
68364 int mode = (table->mode >> 6) & 7;
68365 return (mode << 6) | (mode << 3) | mode;
68366 }
68367 diff -urNp linux-2.6.39.4/net/unix/af_unix.c linux-2.6.39.4/net/unix/af_unix.c
68368 --- linux-2.6.39.4/net/unix/af_unix.c 2011-05-19 00:06:34.000000000 -0400
68369 +++ linux-2.6.39.4/net/unix/af_unix.c 2011-08-05 19:44:37.000000000 -0400
68370 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
68371 err = -ECONNREFUSED;
68372 if (!S_ISSOCK(inode->i_mode))
68373 goto put_fail;
68374 +
68375 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
68376 + err = -EACCES;
68377 + goto put_fail;
68378 + }
68379 +
68380 u = unix_find_socket_byinode(inode);
68381 if (!u)
68382 goto put_fail;
68383 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
68384 if (u) {
68385 struct dentry *dentry;
68386 dentry = unix_sk(u)->dentry;
68387 +
68388 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
68389 + err = -EPERM;
68390 + sock_put(u);
68391 + goto fail;
68392 + }
68393 +
68394 if (dentry)
68395 touch_atime(unix_sk(u)->mnt, dentry);
68396 } else
68397 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
68398 err = security_path_mknod(&nd.path, dentry, mode, 0);
68399 if (err)
68400 goto out_mknod_drop_write;
68401 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
68402 + err = -EACCES;
68403 + goto out_mknod_drop_write;
68404 + }
68405 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
68406 out_mknod_drop_write:
68407 mnt_drop_write(nd.path.mnt);
68408 if (err)
68409 goto out_mknod_dput;
68410 +
68411 + gr_handle_create(dentry, nd.path.mnt);
68412 +
68413 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
68414 dput(nd.path.dentry);
68415 nd.path.dentry = dentry;
68416 @@ -2255,7 +2275,11 @@ static int unix_seq_show(struct seq_file
68417 unix_state_lock(s);
68418
68419 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
68420 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68421 + NULL,
68422 +#else
68423 s,
68424 +#endif
68425 atomic_read(&s->sk_refcnt),
68426 0,
68427 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
68428 diff -urNp linux-2.6.39.4/net/wireless/core.h linux-2.6.39.4/net/wireless/core.h
68429 --- linux-2.6.39.4/net/wireless/core.h 2011-05-19 00:06:34.000000000 -0400
68430 +++ linux-2.6.39.4/net/wireless/core.h 2011-08-05 20:34:06.000000000 -0400
68431 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
68432 struct mutex mtx;
68433
68434 /* rfkill support */
68435 - struct rfkill_ops rfkill_ops;
68436 + rfkill_ops_no_const rfkill_ops;
68437 struct rfkill *rfkill;
68438 struct work_struct rfkill_sync;
68439
68440 diff -urNp linux-2.6.39.4/net/wireless/wext-core.c linux-2.6.39.4/net/wireless/wext-core.c
68441 --- linux-2.6.39.4/net/wireless/wext-core.c 2011-05-19 00:06:34.000000000 -0400
68442 +++ linux-2.6.39.4/net/wireless/wext-core.c 2011-08-05 19:44:37.000000000 -0400
68443 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
68444 */
68445
68446 /* Support for very large requests */
68447 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
68448 - (user_length > descr->max_tokens)) {
68449 + if (user_length > descr->max_tokens) {
68450 /* Allow userspace to GET more than max so
68451 * we can support any size GET requests.
68452 * There is still a limit : -ENOMEM.
68453 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
68454 }
68455 }
68456
68457 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
68458 - /*
68459 - * If this is a GET, but not NOMAX, it means that the extra
68460 - * data is not bounded by userspace, but by max_tokens. Thus
68461 - * set the length to max_tokens. This matches the extra data
68462 - * allocation.
68463 - * The driver should fill it with the number of tokens it
68464 - * provided, and it may check iwp->length rather than having
68465 - * knowledge of max_tokens. If the driver doesn't change the
68466 - * iwp->length, this ioctl just copies back max_token tokens
68467 - * filled with zeroes. Hopefully the driver isn't claiming
68468 - * them to be valid data.
68469 - */
68470 - iwp->length = descr->max_tokens;
68471 - }
68472 -
68473 err = handler(dev, info, (union iwreq_data *) iwp, extra);
68474
68475 iwp->length += essid_compat;
68476 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_policy.c linux-2.6.39.4/net/xfrm/xfrm_policy.c
68477 --- linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-05-19 00:06:34.000000000 -0400
68478 +++ linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-08-05 19:44:37.000000000 -0400
68479 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
68480 {
68481 policy->walk.dead = 1;
68482
68483 - atomic_inc(&policy->genid);
68484 + atomic_inc_unchecked(&policy->genid);
68485
68486 if (del_timer(&policy->timer))
68487 xfrm_pol_put(policy);
68488 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
68489 hlist_add_head(&policy->bydst, chain);
68490 xfrm_pol_hold(policy);
68491 net->xfrm.policy_count[dir]++;
68492 - atomic_inc(&flow_cache_genid);
68493 + atomic_inc_unchecked(&flow_cache_genid);
68494 if (delpol)
68495 __xfrm_policy_unlink(delpol, dir);
68496 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
68497 @@ -1527,7 +1527,7 @@ free_dst:
68498 goto out;
68499 }
68500
68501 -static int inline
68502 +static inline int
68503 xfrm_dst_alloc_copy(void **target, const void *src, int size)
68504 {
68505 if (!*target) {
68506 @@ -1539,7 +1539,7 @@ xfrm_dst_alloc_copy(void **target, const
68507 return 0;
68508 }
68509
68510 -static int inline
68511 +static inline int
68512 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
68513 {
68514 #ifdef CONFIG_XFRM_SUB_POLICY
68515 @@ -1551,7 +1551,7 @@ xfrm_dst_update_parent(struct dst_entry
68516 #endif
68517 }
68518
68519 -static int inline
68520 +static inline int
68521 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
68522 {
68523 #ifdef CONFIG_XFRM_SUB_POLICY
68524 @@ -1645,7 +1645,7 @@ xfrm_resolve_and_create_bundle(struct xf
68525
68526 xdst->num_pols = num_pols;
68527 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
68528 - xdst->policy_genid = atomic_read(&pols[0]->genid);
68529 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
68530
68531 return xdst;
68532 }
68533 @@ -2332,7 +2332,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
68534 if (xdst->xfrm_genid != dst->xfrm->genid)
68535 return 0;
68536 if (xdst->num_pols > 0 &&
68537 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
68538 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
68539 return 0;
68540
68541 mtu = dst_mtu(dst->child);
68542 @@ -2860,7 +2860,7 @@ static int xfrm_policy_migrate(struct xf
68543 sizeof(pol->xfrm_vec[i].saddr));
68544 pol->xfrm_vec[i].encap_family = mp->new_family;
68545 /* flush bundles */
68546 - atomic_inc(&pol->genid);
68547 + atomic_inc_unchecked(&pol->genid);
68548 }
68549 }
68550
68551 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_user.c linux-2.6.39.4/net/xfrm/xfrm_user.c
68552 --- linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-05-19 00:06:34.000000000 -0400
68553 +++ linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-08-05 19:44:37.000000000 -0400
68554 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
68555 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
68556 int i;
68557
68558 + pax_track_stack();
68559 +
68560 if (xp->xfrm_nr == 0)
68561 return 0;
68562
68563 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
68564 int err;
68565 int n = 0;
68566
68567 + pax_track_stack();
68568 +
68569 if (attrs[XFRMA_MIGRATE] == NULL)
68570 return -EINVAL;
68571
68572 diff -urNp linux-2.6.39.4/scripts/basic/fixdep.c linux-2.6.39.4/scripts/basic/fixdep.c
68573 --- linux-2.6.39.4/scripts/basic/fixdep.c 2011-05-19 00:06:34.000000000 -0400
68574 +++ linux-2.6.39.4/scripts/basic/fixdep.c 2011-08-05 19:44:37.000000000 -0400
68575 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
68576
68577 static void parse_config_file(const char *map, size_t len)
68578 {
68579 - const int *end = (const int *) (map + len);
68580 + const unsigned int *end = (const unsigned int *) (map + len);
68581 /* start at +1, so that p can never be < map */
68582 - const int *m = (const int *) map + 1;
68583 + const unsigned int *m = (const unsigned int *) map + 1;
68584 const char *p, *q;
68585
68586 for (; m < end; m++) {
68587 @@ -405,7 +405,7 @@ static void print_deps(void)
68588 static void traps(void)
68589 {
68590 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
68591 - int *p = (int *)test;
68592 + unsigned int *p = (unsigned int *)test;
68593
68594 if (*p != INT_CONF) {
68595 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
68596 diff -urNp linux-2.6.39.4/scripts/gcc-plugin.sh linux-2.6.39.4/scripts/gcc-plugin.sh
68597 --- linux-2.6.39.4/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
68598 +++ linux-2.6.39.4/scripts/gcc-plugin.sh 2011-08-05 20:34:06.000000000 -0400
68599 @@ -0,0 +1,3 @@
68600 +#!/bin/sh
68601 +
68602 +echo "#include \"gcc-plugin.h\"" | $* -x c - -c -o /dev/null -I`$* -print-file-name=plugin`/include>/dev/null 2>&1 && echo "y"
68603 diff -urNp linux-2.6.39.4/scripts/Makefile.build linux-2.6.39.4/scripts/Makefile.build
68604 --- linux-2.6.39.4/scripts/Makefile.build 2011-05-19 00:06:34.000000000 -0400
68605 +++ linux-2.6.39.4/scripts/Makefile.build 2011-08-05 19:44:37.000000000 -0400
68606 @@ -93,7 +93,7 @@ endif
68607 endif
68608
68609 # Do not include host rules unless needed
68610 -ifneq ($(hostprogs-y)$(hostprogs-m),)
68611 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
68612 include scripts/Makefile.host
68613 endif
68614
68615 diff -urNp linux-2.6.39.4/scripts/Makefile.clean linux-2.6.39.4/scripts/Makefile.clean
68616 --- linux-2.6.39.4/scripts/Makefile.clean 2011-05-19 00:06:34.000000000 -0400
68617 +++ linux-2.6.39.4/scripts/Makefile.clean 2011-08-05 19:44:37.000000000 -0400
68618 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
68619 __clean-files := $(extra-y) $(always) \
68620 $(targets) $(clean-files) \
68621 $(host-progs) \
68622 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
68623 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
68624 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
68625
68626 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
68627
68628 diff -urNp linux-2.6.39.4/scripts/Makefile.host linux-2.6.39.4/scripts/Makefile.host
68629 --- linux-2.6.39.4/scripts/Makefile.host 2011-05-19 00:06:34.000000000 -0400
68630 +++ linux-2.6.39.4/scripts/Makefile.host 2011-08-05 19:44:37.000000000 -0400
68631 @@ -31,6 +31,7 @@
68632 # Note: Shared libraries consisting of C++ files are not supported
68633
68634 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
68635 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
68636
68637 # C code
68638 # Executables compiled from a single .c file
68639 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
68640 # Shared libaries (only .c supported)
68641 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
68642 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
68643 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
68644 # Remove .so files from "xxx-objs"
68645 host-cobjs := $(filter-out %.so,$(host-cobjs))
68646
68647 diff -urNp linux-2.6.39.4/scripts/mod/file2alias.c linux-2.6.39.4/scripts/mod/file2alias.c
68648 --- linux-2.6.39.4/scripts/mod/file2alias.c 2011-05-19 00:06:34.000000000 -0400
68649 +++ linux-2.6.39.4/scripts/mod/file2alias.c 2011-08-05 19:44:37.000000000 -0400
68650 @@ -72,7 +72,7 @@ static void device_id_check(const char *
68651 unsigned long size, unsigned long id_size,
68652 void *symval)
68653 {
68654 - int i;
68655 + unsigned int i;
68656
68657 if (size % id_size || size < id_size) {
68658 if (cross_build != 0)
68659 @@ -102,7 +102,7 @@ static void device_id_check(const char *
68660 /* USB is special because the bcdDevice can be matched against a numeric range */
68661 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
68662 static void do_usb_entry(struct usb_device_id *id,
68663 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
68664 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
68665 unsigned char range_lo, unsigned char range_hi,
68666 unsigned char max, struct module *mod)
68667 {
68668 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
68669 for (i = 0; i < count; i++) {
68670 const char *id = (char *)devs[i].id;
68671 char acpi_id[sizeof(devs[0].id)];
68672 - int j;
68673 + unsigned int j;
68674
68675 buf_printf(&mod->dev_table_buf,
68676 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68677 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
68678
68679 for (j = 0; j < PNP_MAX_DEVICES; j++) {
68680 const char *id = (char *)card->devs[j].id;
68681 - int i2, j2;
68682 + unsigned int i2, j2;
68683 int dup = 0;
68684
68685 if (!id[0])
68686 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
68687 /* add an individual alias for every device entry */
68688 if (!dup) {
68689 char acpi_id[sizeof(card->devs[0].id)];
68690 - int k;
68691 + unsigned int k;
68692
68693 buf_printf(&mod->dev_table_buf,
68694 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68695 @@ -768,7 +768,7 @@ static void dmi_ascii_filter(char *d, co
68696 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
68697 char *alias)
68698 {
68699 - int i, j;
68700 + unsigned int i, j;
68701
68702 sprintf(alias, "dmi*");
68703
68704 diff -urNp linux-2.6.39.4/scripts/mod/modpost.c linux-2.6.39.4/scripts/mod/modpost.c
68705 --- linux-2.6.39.4/scripts/mod/modpost.c 2011-05-19 00:06:34.000000000 -0400
68706 +++ linux-2.6.39.4/scripts/mod/modpost.c 2011-08-05 19:44:37.000000000 -0400
68707 @@ -896,6 +896,7 @@ enum mismatch {
68708 ANY_INIT_TO_ANY_EXIT,
68709 ANY_EXIT_TO_ANY_INIT,
68710 EXPORT_TO_INIT_EXIT,
68711 + DATA_TO_TEXT
68712 };
68713
68714 struct sectioncheck {
68715 @@ -1004,6 +1005,12 @@ const struct sectioncheck sectioncheck[]
68716 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
68717 .mismatch = EXPORT_TO_INIT_EXIT,
68718 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
68719 +},
68720 +/* Do not reference code from writable data */
68721 +{
68722 + .fromsec = { DATA_SECTIONS, NULL },
68723 + .tosec = { TEXT_SECTIONS, NULL },
68724 + .mismatch = DATA_TO_TEXT
68725 }
68726 };
68727
68728 @@ -1126,10 +1133,10 @@ static Elf_Sym *find_elf_symbol(struct e
68729 continue;
68730 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
68731 continue;
68732 - if (sym->st_value == addr)
68733 - return sym;
68734 /* Find a symbol nearby - addr are maybe negative */
68735 d = sym->st_value - addr;
68736 + if (d == 0)
68737 + return sym;
68738 if (d < 0)
68739 d = addr - sym->st_value;
68740 if (d < distance) {
68741 @@ -1408,6 +1415,14 @@ static void report_sec_mismatch(const ch
68742 tosym, prl_to, prl_to, tosym);
68743 free(prl_to);
68744 break;
68745 + case DATA_TO_TEXT:
68746 +/*
68747 + fprintf(stderr,
68748 + "The variable %s references\n"
68749 + "the %s %s%s%s\n",
68750 + fromsym, to, sec2annotation(tosec), tosym, to_p);
68751 +*/
68752 + break;
68753 }
68754 fprintf(stderr, "\n");
68755 }
68756 @@ -1633,7 +1648,7 @@ static void section_rel(const char *modn
68757 static void check_sec_ref(struct module *mod, const char *modname,
68758 struct elf_info *elf)
68759 {
68760 - int i;
68761 + unsigned int i;
68762 Elf_Shdr *sechdrs = elf->sechdrs;
68763
68764 /* Walk through all sections */
68765 @@ -1731,7 +1746,7 @@ void __attribute__((format(printf, 2, 3)
68766 va_end(ap);
68767 }
68768
68769 -void buf_write(struct buffer *buf, const char *s, int len)
68770 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
68771 {
68772 if (buf->size - buf->pos < len) {
68773 buf->size += len + SZ;
68774 @@ -1943,7 +1958,7 @@ static void write_if_changed(struct buff
68775 if (fstat(fileno(file), &st) < 0)
68776 goto close_write;
68777
68778 - if (st.st_size != b->pos)
68779 + if (st.st_size != (off_t)b->pos)
68780 goto close_write;
68781
68782 tmp = NOFAIL(malloc(b->pos));
68783 diff -urNp linux-2.6.39.4/scripts/mod/modpost.h linux-2.6.39.4/scripts/mod/modpost.h
68784 --- linux-2.6.39.4/scripts/mod/modpost.h 2011-05-19 00:06:34.000000000 -0400
68785 +++ linux-2.6.39.4/scripts/mod/modpost.h 2011-08-05 19:44:37.000000000 -0400
68786 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
68787
68788 struct buffer {
68789 char *p;
68790 - int pos;
68791 - int size;
68792 + unsigned int pos;
68793 + unsigned int size;
68794 };
68795
68796 void __attribute__((format(printf, 2, 3)))
68797 buf_printf(struct buffer *buf, const char *fmt, ...);
68798
68799 void
68800 -buf_write(struct buffer *buf, const char *s, int len);
68801 +buf_write(struct buffer *buf, const char *s, unsigned int len);
68802
68803 struct module {
68804 struct module *next;
68805 diff -urNp linux-2.6.39.4/scripts/mod/sumversion.c linux-2.6.39.4/scripts/mod/sumversion.c
68806 --- linux-2.6.39.4/scripts/mod/sumversion.c 2011-05-19 00:06:34.000000000 -0400
68807 +++ linux-2.6.39.4/scripts/mod/sumversion.c 2011-08-05 19:44:37.000000000 -0400
68808 @@ -470,7 +470,7 @@ static void write_version(const char *fi
68809 goto out;
68810 }
68811
68812 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
68813 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
68814 warn("writing sum in %s failed: %s\n",
68815 filename, strerror(errno));
68816 goto out;
68817 diff -urNp linux-2.6.39.4/scripts/pnmtologo.c linux-2.6.39.4/scripts/pnmtologo.c
68818 --- linux-2.6.39.4/scripts/pnmtologo.c 2011-05-19 00:06:34.000000000 -0400
68819 +++ linux-2.6.39.4/scripts/pnmtologo.c 2011-08-05 19:44:37.000000000 -0400
68820 @@ -237,14 +237,14 @@ static void write_header(void)
68821 fprintf(out, " * Linux logo %s\n", logoname);
68822 fputs(" */\n\n", out);
68823 fputs("#include <linux/linux_logo.h>\n\n", out);
68824 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
68825 + fprintf(out, "static unsigned char %s_data[] = {\n",
68826 logoname);
68827 }
68828
68829 static void write_footer(void)
68830 {
68831 fputs("\n};\n\n", out);
68832 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
68833 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
68834 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
68835 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
68836 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
68837 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
68838 fputs("\n};\n\n", out);
68839
68840 /* write logo clut */
68841 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
68842 + fprintf(out, "static unsigned char %s_clut[] = {\n",
68843 logoname);
68844 write_hex_cnt = 0;
68845 for (i = 0; i < logo_clutsize; i++) {
68846 diff -urNp linux-2.6.39.4/security/apparmor/lsm.c linux-2.6.39.4/security/apparmor/lsm.c
68847 --- linux-2.6.39.4/security/apparmor/lsm.c 2011-06-25 12:55:23.000000000 -0400
68848 +++ linux-2.6.39.4/security/apparmor/lsm.c 2011-08-05 20:34:06.000000000 -0400
68849 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
68850 return error;
68851 }
68852
68853 -static struct security_operations apparmor_ops = {
68854 +static struct security_operations apparmor_ops __read_only = {
68855 .name = "apparmor",
68856
68857 .ptrace_access_check = apparmor_ptrace_access_check,
68858 diff -urNp linux-2.6.39.4/security/commoncap.c linux-2.6.39.4/security/commoncap.c
68859 --- linux-2.6.39.4/security/commoncap.c 2011-05-19 00:06:34.000000000 -0400
68860 +++ linux-2.6.39.4/security/commoncap.c 2011-08-05 19:44:37.000000000 -0400
68861 @@ -28,6 +28,7 @@
68862 #include <linux/prctl.h>
68863 #include <linux/securebits.h>
68864 #include <linux/user_namespace.h>
68865 +#include <net/sock.h>
68866
68867 /*
68868 * If a non-root user executes a setuid-root binary in
68869 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
68870
68871 int cap_netlink_recv(struct sk_buff *skb, int cap)
68872 {
68873 - if (!cap_raised(current_cap(), cap))
68874 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
68875 return -EPERM;
68876 return 0;
68877 }
68878 @@ -580,6 +581,9 @@ int cap_bprm_secureexec(struct linux_bin
68879 {
68880 const struct cred *cred = current_cred();
68881
68882 + if (gr_acl_enable_at_secure())
68883 + return 1;
68884 +
68885 if (cred->uid != 0) {
68886 if (bprm->cap_effective)
68887 return 1;
68888 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_api.c linux-2.6.39.4/security/integrity/ima/ima_api.c
68889 --- linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-05-19 00:06:34.000000000 -0400
68890 +++ linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-08-05 19:44:37.000000000 -0400
68891 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
68892 int result;
68893
68894 /* can overflow, only indicator */
68895 - atomic_long_inc(&ima_htable.violations);
68896 + atomic_long_inc_unchecked(&ima_htable.violations);
68897
68898 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
68899 if (!entry) {
68900 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_fs.c linux-2.6.39.4/security/integrity/ima/ima_fs.c
68901 --- linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-05-19 00:06:34.000000000 -0400
68902 +++ linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-08-05 19:44:37.000000000 -0400
68903 @@ -28,12 +28,12 @@
68904 static int valid_policy = 1;
68905 #define TMPBUFLEN 12
68906 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
68907 - loff_t *ppos, atomic_long_t *val)
68908 + loff_t *ppos, atomic_long_unchecked_t *val)
68909 {
68910 char tmpbuf[TMPBUFLEN];
68911 ssize_t len;
68912
68913 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
68914 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
68915 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
68916 }
68917
68918 diff -urNp linux-2.6.39.4/security/integrity/ima/ima.h linux-2.6.39.4/security/integrity/ima/ima.h
68919 --- linux-2.6.39.4/security/integrity/ima/ima.h 2011-05-19 00:06:34.000000000 -0400
68920 +++ linux-2.6.39.4/security/integrity/ima/ima.h 2011-08-05 19:44:37.000000000 -0400
68921 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
68922 extern spinlock_t ima_queue_lock;
68923
68924 struct ima_h_table {
68925 - atomic_long_t len; /* number of stored measurements in the list */
68926 - atomic_long_t violations;
68927 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
68928 + atomic_long_unchecked_t violations;
68929 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
68930 };
68931 extern struct ima_h_table ima_htable;
68932 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_queue.c linux-2.6.39.4/security/integrity/ima/ima_queue.c
68933 --- linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-05-19 00:06:34.000000000 -0400
68934 +++ linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-08-05 19:44:37.000000000 -0400
68935 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
68936 INIT_LIST_HEAD(&qe->later);
68937 list_add_tail_rcu(&qe->later, &ima_measurements);
68938
68939 - atomic_long_inc(&ima_htable.len);
68940 + atomic_long_inc_unchecked(&ima_htable.len);
68941 key = ima_hash_key(entry->digest);
68942 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
68943 return 0;
68944 diff -urNp linux-2.6.39.4/security/Kconfig linux-2.6.39.4/security/Kconfig
68945 --- linux-2.6.39.4/security/Kconfig 2011-05-19 00:06:34.000000000 -0400
68946 +++ linux-2.6.39.4/security/Kconfig 2011-08-05 19:44:37.000000000 -0400
68947 @@ -4,6 +4,554 @@
68948
68949 menu "Security options"
68950
68951 +source grsecurity/Kconfig
68952 +
68953 +menu "PaX"
68954 +
68955 + config ARCH_TRACK_EXEC_LIMIT
68956 + bool
68957 +
68958 + config PAX_PER_CPU_PGD
68959 + bool
68960 +
68961 + config TASK_SIZE_MAX_SHIFT
68962 + int
68963 + depends on X86_64
68964 + default 47 if !PAX_PER_CPU_PGD
68965 + default 42 if PAX_PER_CPU_PGD
68966 +
68967 + config PAX_ENABLE_PAE
68968 + bool
68969 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
68970 +
68971 +config PAX
68972 + bool "Enable various PaX features"
68973 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
68974 + help
68975 + This allows you to enable various PaX features. PaX adds
68976 + intrusion prevention mechanisms to the kernel that reduce
68977 + the risks posed by exploitable memory corruption bugs.
68978 +
68979 +menu "PaX Control"
68980 + depends on PAX
68981 +
68982 +config PAX_SOFTMODE
68983 + bool 'Support soft mode'
68984 + select PAX_PT_PAX_FLAGS
68985 + help
68986 + Enabling this option will allow you to run PaX in soft mode, that
68987 + is, PaX features will not be enforced by default, only on executables
68988 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
68989 + is the only way to mark executables for soft mode use.
68990 +
68991 + Soft mode can be activated by using the "pax_softmode=1" kernel command
68992 + line option on boot. Furthermore you can control various PaX features
68993 + at runtime via the entries in /proc/sys/kernel/pax.
68994 +
68995 +config PAX_EI_PAX
68996 + bool 'Use legacy ELF header marking'
68997 + help
68998 + Enabling this option will allow you to control PaX features on
68999 + a per executable basis via the 'chpax' utility available at
69000 + http://pax.grsecurity.net/. The control flags will be read from
69001 + an otherwise reserved part of the ELF header. This marking has
69002 + numerous drawbacks (no support for soft-mode, toolchain does not
69003 + know about the non-standard use of the ELF header) therefore it
69004 + has been deprecated in favour of PT_PAX_FLAGS support.
69005 +
69006 + Note that if you enable PT_PAX_FLAGS marking support as well,
69007 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
69008 +
69009 +config PAX_PT_PAX_FLAGS
69010 + bool 'Use ELF program header marking'
69011 + help
69012 + Enabling this option will allow you to control PaX features on
69013 + a per executable basis via the 'paxctl' utility available at
69014 + http://pax.grsecurity.net/. The control flags will be read from
69015 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
69016 + has the benefits of supporting both soft mode and being fully
69017 + integrated into the toolchain (the binutils patch is available
69018 + from http://pax.grsecurity.net).
69019 +
69020 + If your toolchain does not support PT_PAX_FLAGS markings,
69021 + you can create one in most cases with 'paxctl -C'.
69022 +
69023 + Note that if you enable the legacy EI_PAX marking support as well,
69024 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
69025 +
69026 +choice
69027 + prompt 'MAC system integration'
69028 + default PAX_HAVE_ACL_FLAGS
69029 + help
69030 + Mandatory Access Control systems have the option of controlling
69031 + PaX flags on a per executable basis, choose the method supported
69032 + by your particular system.
69033 +
69034 + - "none": if your MAC system does not interact with PaX,
69035 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
69036 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
69037 +
69038 + NOTE: this option is for developers/integrators only.
69039 +
69040 + config PAX_NO_ACL_FLAGS
69041 + bool 'none'
69042 +
69043 + config PAX_HAVE_ACL_FLAGS
69044 + bool 'direct'
69045 +
69046 + config PAX_HOOK_ACL_FLAGS
69047 + bool 'hook'
69048 +endchoice
69049 +
69050 +endmenu
69051 +
69052 +menu "Non-executable pages"
69053 + depends on PAX
69054 +
69055 +config PAX_NOEXEC
69056 + bool "Enforce non-executable pages"
69057 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
69058 + help
69059 + By design some architectures do not allow for protecting memory
69060 + pages against execution or even if they do, Linux does not make
69061 + use of this feature. In practice this means that if a page is
69062 + readable (such as the stack or heap) it is also executable.
69063 +
69064 + There is a well known exploit technique that makes use of this
69065 + fact and a common programming mistake where an attacker can
69066 + introduce code of his choice somewhere in the attacked program's
69067 + memory (typically the stack or the heap) and then execute it.
69068 +
69069 + If the attacked program was running with different (typically
69070 + higher) privileges than that of the attacker, then he can elevate
69071 + his own privilege level (e.g. get a root shell, write to files for
69072 + which he does not have write access to, etc).
69073 +
69074 + Enabling this option will let you choose from various features
69075 + that prevent the injection and execution of 'foreign' code in
69076 + a program.
69077 +
69078 + This will also break programs that rely on the old behaviour and
69079 + expect that dynamically allocated memory via the malloc() family
69080 + of functions is executable (which it is not). Notable examples
69081 + are the XFree86 4.x server, the java runtime and wine.
69082 +
69083 +config PAX_PAGEEXEC
69084 + bool "Paging based non-executable pages"
69085 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
69086 + select S390_SWITCH_AMODE if S390
69087 + select S390_EXEC_PROTECT if S390
69088 + select ARCH_TRACK_EXEC_LIMIT if X86_32
69089 + help
69090 + This implementation is based on the paging feature of the CPU.
69091 + On i386 without hardware non-executable bit support there is a
69092 + variable but usually low performance impact, however on Intel's
69093 + P4 core based CPUs it is very high so you should not enable this
69094 + for kernels meant to be used on such CPUs.
69095 +
69096 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
69097 + with hardware non-executable bit support there is no performance
69098 + impact, on ppc the impact is negligible.
69099 +
69100 + Note that several architectures require various emulations due to
69101 + badly designed userland ABIs, this will cause a performance impact
69102 + but will disappear as soon as userland is fixed. For example, ppc
69103 + userland MUST have been built with secure-plt by a recent toolchain.
69104 +
69105 +config PAX_SEGMEXEC
69106 + bool "Segmentation based non-executable pages"
69107 + depends on PAX_NOEXEC && X86_32
69108 + help
69109 + This implementation is based on the segmentation feature of the
69110 + CPU and has a very small performance impact, however applications
69111 + will be limited to a 1.5 GB address space instead of the normal
69112 + 3 GB.
69113 +
69114 +config PAX_EMUTRAMP
69115 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
69116 + default y if PARISC
69117 + help
69118 + There are some programs and libraries that for one reason or
69119 + another attempt to execute special small code snippets from
69120 + non-executable memory pages. Most notable examples are the
69121 + signal handler return code generated by the kernel itself and
69122 + the GCC trampolines.
69123 +
69124 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
69125 + such programs will no longer work under your kernel.
69126 +
69127 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
69128 + utilities to enable trampoline emulation for the affected programs
69129 + yet still have the protection provided by the non-executable pages.
69130 +
69131 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
69132 + your system will not even boot.
69133 +
69134 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
69135 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
69136 + for the affected files.
69137 +
69138 + NOTE: enabling this feature *may* open up a loophole in the
69139 + protection provided by non-executable pages that an attacker
69140 + could abuse. Therefore the best solution is to not have any
69141 + files on your system that would require this option. This can
69142 + be achieved by not using libc5 (which relies on the kernel
69143 + signal handler return code) and not using or rewriting programs
69144 + that make use of the nested function implementation of GCC.
69145 + Skilled users can just fix GCC itself so that it implements
69146 + nested function calls in a way that does not interfere with PaX.
69147 +
69148 +config PAX_EMUSIGRT
69149 + bool "Automatically emulate sigreturn trampolines"
69150 + depends on PAX_EMUTRAMP && PARISC
69151 + default y
69152 + help
69153 + Enabling this option will have the kernel automatically detect
69154 + and emulate signal return trampolines executing on the stack
69155 + that would otherwise lead to task termination.
69156 +
69157 + This solution is intended as a temporary one for users with
69158 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
69159 + Modula-3 runtime, etc) or executables linked to such, basically
69160 + everything that does not specify its own SA_RESTORER function in
69161 + normal executable memory like glibc 2.1+ does.
69162 +
69163 + On parisc you MUST enable this option, otherwise your system will
69164 + not even boot.
69165 +
69166 + NOTE: this feature cannot be disabled on a per executable basis
69167 + and since it *does* open up a loophole in the protection provided
69168 + by non-executable pages, the best solution is to not have any
69169 + files on your system that would require this option.
69170 +
69171 +config PAX_MPROTECT
69172 + bool "Restrict mprotect()"
69173 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
69174 + help
69175 + Enabling this option will prevent programs from
69176 + - changing the executable status of memory pages that were
69177 + not originally created as executable,
69178 + - making read-only executable pages writable again,
69179 + - creating executable pages from anonymous memory,
69180 + - making read-only-after-relocations (RELRO) data pages writable again.
69181 +
69182 + You should say Y here to complete the protection provided by
69183 + the enforcement of non-executable pages.
69184 +
69185 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69186 + this feature on a per file basis.
69187 +
69188 +config PAX_MPROTECT_COMPAT
69189 + bool "Use legacy/compat protection demoting (read help)"
69190 + depends on PAX_MPROTECT
69191 + default n
69192 + help
69193 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
69194 + by sending the proper error code to the application. For some broken
69195 + userland, this can cause problems with Python or other applications. The
69196 + current implementation however allows for applications like clamav to
69197 + detect if JIT compilation/execution is allowed and to fall back gracefully
69198 + to an interpreter-based mode if it does not. While we encourage everyone
69199 + to use the current implementation as-is and push upstream to fix broken
69200 + userland (note that the RWX logging option can assist with this), in some
69201 + environments this may not be possible. Having to disable MPROTECT
69202 + completely on certain binaries reduces the security benefit of PaX,
69203 + so this option is provided for those environments to revert to the old
69204 + behavior.
69205 +
69206 +config PAX_ELFRELOCS
69207 + bool "Allow ELF text relocations (read help)"
69208 + depends on PAX_MPROTECT
69209 + default n
69210 + help
69211 + Non-executable pages and mprotect() restrictions are effective
69212 + in preventing the introduction of new executable code into an
69213 + attacked task's address space. There remain only two venues
69214 + for this kind of attack: if the attacker can execute already
69215 + existing code in the attacked task then he can either have it
69216 + create and mmap() a file containing his code or have it mmap()
69217 + an already existing ELF library that does not have position
69218 + independent code in it and use mprotect() on it to make it
69219 + writable and copy his code there. While protecting against
69220 + the former approach is beyond PaX, the latter can be prevented
69221 + by having only PIC ELF libraries on one's system (which do not
69222 + need to relocate their code). If you are sure this is your case,
69223 + as is the case with all modern Linux distributions, then leave
69224 + this option disabled. You should say 'n' here.
69225 +
69226 +config PAX_ETEXECRELOCS
69227 + bool "Allow ELF ET_EXEC text relocations"
69228 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
69229 + select PAX_ELFRELOCS
69230 + default y
69231 + help
69232 + On some architectures there are incorrectly created applications
69233 + that require text relocations and would not work without enabling
69234 + this option. If you are an alpha, ia64 or parisc user, you should
69235 + enable this option and disable it once you have made sure that
69236 + none of your applications need it.
69237 +
69238 +config PAX_EMUPLT
69239 + bool "Automatically emulate ELF PLT"
69240 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
69241 + default y
69242 + help
69243 + Enabling this option will have the kernel automatically detect
69244 + and emulate the Procedure Linkage Table entries in ELF files.
69245 + On some architectures such entries are in writable memory, and
69246 + become non-executable leading to task termination. Therefore
69247 + it is mandatory that you enable this option on alpha, parisc,
69248 + sparc and sparc64, otherwise your system would not even boot.
69249 +
69250 + NOTE: this feature *does* open up a loophole in the protection
69251 + provided by the non-executable pages, therefore the proper
69252 + solution is to modify the toolchain to produce a PLT that does
69253 + not need to be writable.
69254 +
69255 +config PAX_DLRESOLVE
69256 + bool 'Emulate old glibc resolver stub'
69257 + depends on PAX_EMUPLT && SPARC
69258 + default n
69259 + help
69260 + This option is needed if userland has an old glibc (before 2.4)
69261 + that puts a 'save' instruction into the runtime generated resolver
69262 + stub that needs special emulation.
69263 +
69264 +config PAX_KERNEXEC
69265 + bool "Enforce non-executable kernel pages"
69266 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
69267 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
69268 + help
69269 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
69270 + that is, enabling this option will make it harder to inject
69271 + and execute 'foreign' code in kernel memory itself.
69272 +
69273 + Note that on x86_64 kernels there is a known regression when
69274 + this feature and KVM/VMX are both enabled in the host kernel.
69275 +
69276 +config PAX_KERNEXEC_MODULE_TEXT
69277 + int "Minimum amount of memory reserved for module code"
69278 + default "4"
69279 + depends on PAX_KERNEXEC && X86_32 && MODULES
69280 + help
69281 + Due to implementation details the kernel must reserve a fixed
69282 + amount of memory for module code at compile time that cannot be
69283 + changed at runtime. Here you can specify the minimum amount
69284 + in MB that will be reserved. Due to the same implementation
69285 + details this size will always be rounded up to the next 2/4 MB
69286 + boundary (depends on PAE) so the actually available memory for
69287 + module code will usually be more than this minimum.
69288 +
69289 + The default 4 MB should be enough for most users but if you have
69290 + an excessive number of modules (e.g., most distribution configs
69291 + compile many drivers as modules) or use huge modules such as
69292 + nvidia's kernel driver, you will need to adjust this amount.
69293 + A good rule of thumb is to look at your currently loaded kernel
69294 + modules and add up their sizes.
69295 +
69296 +endmenu
69297 +
69298 +menu "Address Space Layout Randomization"
69299 + depends on PAX
69300 +
69301 +config PAX_ASLR
69302 + bool "Address Space Layout Randomization"
69303 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
69304 + help
69305 + Many if not most exploit techniques rely on the knowledge of
69306 + certain addresses in the attacked program. The following options
69307 + will allow the kernel to apply a certain amount of randomization
69308 + to specific parts of the program thereby forcing an attacker to
69309 + guess them in most cases. Any failed guess will most likely crash
69310 + the attacked program which allows the kernel to detect such attempts
69311 + and react on them. PaX itself provides no reaction mechanisms,
69312 + instead it is strongly encouraged that you make use of Nergal's
69313 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
69314 + (http://www.grsecurity.net/) built-in crash detection features or
69315 + develop one yourself.
69316 +
69317 + By saying Y here you can choose to randomize the following areas:
69318 + - top of the task's kernel stack
69319 + - top of the task's userland stack
69320 + - base address for mmap() requests that do not specify one
69321 + (this includes all libraries)
69322 + - base address of the main executable
69323 +
69324 + It is strongly recommended to say Y here as address space layout
69325 + randomization has negligible impact on performance yet it provides
69326 + a very effective protection.
69327 +
69328 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69329 + this feature on a per file basis.
69330 +
69331 +config PAX_RANDKSTACK
69332 + bool "Randomize kernel stack base"
69333 + depends on PAX_ASLR && X86_TSC && X86
69334 + help
69335 + By saying Y here the kernel will randomize every task's kernel
69336 + stack on every system call. This will not only force an attacker
69337 + to guess it but also prevent him from making use of possible
69338 + leaked information about it.
69339 +
69340 + Since the kernel stack is a rather scarce resource, randomization
69341 + may cause unexpected stack overflows, therefore you should very
69342 + carefully test your system. Note that once enabled in the kernel
69343 + configuration, this feature cannot be disabled on a per file basis.
69344 +
69345 +config PAX_RANDUSTACK
69346 + bool "Randomize user stack base"
69347 + depends on PAX_ASLR
69348 + help
69349 + By saying Y here the kernel will randomize every task's userland
69350 + stack. The randomization is done in two steps where the second
69351 + one may apply a big amount of shift to the top of the stack and
69352 + cause problems for programs that want to use lots of memory (more
69353 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
69354 + For this reason the second step can be controlled by 'chpax' or
69355 + 'paxctl' on a per file basis.
69356 +
69357 +config PAX_RANDMMAP
69358 + bool "Randomize mmap() base"
69359 + depends on PAX_ASLR
69360 + help
69361 + By saying Y here the kernel will use a randomized base address for
69362 + mmap() requests that do not specify one themselves. As a result
69363 + all dynamically loaded libraries will appear at random addresses
69364 + and therefore be harder to exploit by a technique where an attacker
69365 + attempts to execute library code for his purposes (e.g. spawn a
69366 + shell from an exploited program that is running at an elevated
69367 + privilege level).
69368 +
69369 + Furthermore, if a program is relinked as a dynamic ELF file, its
69370 + base address will be randomized as well, completing the full
69371 + randomization of the address space layout. Attacking such programs
69372 + becomes a guess game. You can find an example of doing this at
69373 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
69374 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
69375 +
69376 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
69377 + feature on a per file basis.
69378 +
69379 +endmenu
69380 +
69381 +menu "Miscellaneous hardening features"
69382 +
69383 +config PAX_MEMORY_SANITIZE
69384 + bool "Sanitize all freed memory"
69385 + help
69386 + By saying Y here the kernel will erase memory pages as soon as they
69387 + are freed. This in turn reduces the lifetime of data stored in the
69388 + pages, making it less likely that sensitive information such as
69389 + passwords, cryptographic secrets, etc stay in memory for too long.
69390 +
69391 + This is especially useful for programs whose runtime is short, long
69392 + lived processes and the kernel itself benefit from this as long as
69393 + they operate on whole memory pages and ensure timely freeing of pages
69394 + that may hold sensitive information.
69395 +
69396 + The tradeoff is performance impact, on a single CPU system kernel
69397 + compilation sees a 3% slowdown, other systems and workloads may vary
69398 + and you are advised to test this feature on your expected workload
69399 + before deploying it.
69400 +
69401 + Note that this feature does not protect data stored in live pages,
69402 + e.g., process memory swapped to disk may stay there for a long time.
69403 +
69404 +config PAX_MEMORY_STACKLEAK
69405 + bool "Sanitize kernel stack"
69406 + depends on X86
69407 + help
69408 + By saying Y here the kernel will erase the kernel stack before it
69409 + returns from a system call. This in turn reduces the information
69410 + that a kernel stack leak bug can reveal.
69411 +
69412 + Note that such a bug can still leak information that was put on
69413 + the stack by the current system call (the one eventually triggering
69414 + the bug) but traces of earlier system calls on the kernel stack
69415 + cannot leak anymore.
69416 +
69417 + The tradeoff is performance impact: on a single CPU system kernel
69418 + compilation sees a 1% slowdown, other systems and workloads may vary
69419 + and you are advised to test this feature on your expected workload
69420 + before deploying it.
69421 +
69422 + Note: full support for this feature requires gcc with plugin support
69423 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
69424 + is not supported). Using older gcc versions means that functions
69425 + with large enough stack frames may leave uninitialized memory behind
69426 + that may be exposed to a later syscall leaking the stack.
69427 +
69428 +config PAX_MEMORY_UDEREF
69429 + bool "Prevent invalid userland pointer dereference"
69430 + depends on X86 && !UML_X86 && !XEN
69431 + select PAX_PER_CPU_PGD if X86_64
69432 + help
69433 + By saying Y here the kernel will be prevented from dereferencing
69434 + userland pointers in contexts where the kernel expects only kernel
69435 + pointers. This is both a useful runtime debugging feature and a
69436 + security measure that prevents exploiting a class of kernel bugs.
69437 +
69438 + The tradeoff is that some virtualization solutions may experience
69439 + a huge slowdown and therefore you should not enable this feature
69440 + for kernels meant to run in such environments. Whether a given VM
69441 + solution is affected or not is best determined by simply trying it
69442 + out, the performance impact will be obvious right on boot as this
69443 + mechanism engages from very early on. A good rule of thumb is that
69444 + VMs running on CPUs without hardware virtualization support (i.e.,
69445 + the majority of IA-32 CPUs) will likely experience the slowdown.
69446 +
69447 +config PAX_REFCOUNT
69448 + bool "Prevent various kernel object reference counter overflows"
69449 + depends on GRKERNSEC && (X86 || SPARC64)
69450 + help
69451 + By saying Y here the kernel will detect and prevent overflowing
69452 + various (but not all) kinds of object reference counters. Such
69453 + overflows can normally occur due to bugs only and are often, if
69454 + not always, exploitable.
69455 +
69456 + The tradeoff is that data structures protected by an overflowed
69457 + refcount will never be freed and therefore will leak memory. Note
69458 + that this leak also happens even without this protection but in
69459 + that case the overflow can eventually trigger the freeing of the
69460 + data structure while it is still being used elsewhere, resulting
69461 + in the exploitable situation that this feature prevents.
69462 +
69463 + Since this has a negligible performance impact, you should enable
69464 + this feature.
69465 +
69466 +config PAX_USERCOPY
69467 + bool "Harden heap object copies between kernel and userland"
69468 + depends on X86 || PPC || SPARC || ARM
69469 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
69470 + help
69471 + By saying Y here the kernel will enforce the size of heap objects
69472 + when they are copied in either direction between the kernel and
69473 + userland, even if only a part of the heap object is copied.
69474 +
69475 + Specifically, this checking prevents information leaking from the
69476 + kernel heap during kernel to userland copies (if the kernel heap
69477 + object is otherwise fully initialized) and prevents kernel heap
69478 + overflows during userland to kernel copies.
69479 +
69480 + Note that the current implementation provides the strictest bounds
69481 + checks for the SLUB allocator.
69482 +
69483 + Enabling this option also enables per-slab cache protection against
69484 + data in a given cache being copied into/out of via userland
69485 + accessors. Though the whitelist of regions will be reduced over
69486 + time, it notably protects important data structures like task structs.
69487 +
69488 + If frame pointers are enabled on x86, this option will also restrict
69489 + copies into and out of the kernel stack to local variables within a
69490 + single frame.
69491 +
69492 + Since this has a negligible performance impact, you should enable
69493 + this feature.
69494 +
69495 +endmenu
69496 +
69497 +endmenu
69498 +
69499 config KEYS
69500 bool "Enable access key retention support"
69501 help
69502 @@ -167,7 +715,7 @@ config INTEL_TXT
69503 config LSM_MMAP_MIN_ADDR
69504 int "Low address space for LSM to protect from user allocation"
69505 depends on SECURITY && SECURITY_SELINUX
69506 - default 65536
69507 + default 32768
69508 help
69509 This is the portion of low virtual memory which should be protected
69510 from userspace allocation. Keeping a user from writing to low pages
69511 diff -urNp linux-2.6.39.4/security/keys/keyring.c linux-2.6.39.4/security/keys/keyring.c
69512 --- linux-2.6.39.4/security/keys/keyring.c 2011-05-19 00:06:34.000000000 -0400
69513 +++ linux-2.6.39.4/security/keys/keyring.c 2011-08-05 19:44:37.000000000 -0400
69514 @@ -213,15 +213,15 @@ static long keyring_read(const struct ke
69515 ret = -EFAULT;
69516
69517 for (loop = 0; loop < klist->nkeys; loop++) {
69518 + key_serial_t serial;
69519 key = klist->keys[loop];
69520 + serial = key->serial;
69521
69522 tmp = sizeof(key_serial_t);
69523 if (tmp > buflen)
69524 tmp = buflen;
69525
69526 - if (copy_to_user(buffer,
69527 - &key->serial,
69528 - tmp) != 0)
69529 + if (copy_to_user(buffer, &serial, tmp))
69530 goto error;
69531
69532 buflen -= tmp;
69533 diff -urNp linux-2.6.39.4/security/min_addr.c linux-2.6.39.4/security/min_addr.c
69534 --- linux-2.6.39.4/security/min_addr.c 2011-05-19 00:06:34.000000000 -0400
69535 +++ linux-2.6.39.4/security/min_addr.c 2011-08-05 19:44:37.000000000 -0400
69536 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
69537 */
69538 static void update_mmap_min_addr(void)
69539 {
69540 +#ifndef SPARC
69541 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
69542 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
69543 mmap_min_addr = dac_mmap_min_addr;
69544 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
69545 #else
69546 mmap_min_addr = dac_mmap_min_addr;
69547 #endif
69548 +#endif
69549 }
69550
69551 /*
69552 diff -urNp linux-2.6.39.4/security/security.c linux-2.6.39.4/security/security.c
69553 --- linux-2.6.39.4/security/security.c 2011-05-19 00:06:34.000000000 -0400
69554 +++ linux-2.6.39.4/security/security.c 2011-08-05 19:44:37.000000000 -0400
69555 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
69556 /* things that live in capability.c */
69557 extern void __init security_fixup_ops(struct security_operations *ops);
69558
69559 -static struct security_operations *security_ops;
69560 -static struct security_operations default_security_ops = {
69561 +static struct security_operations *security_ops __read_only;
69562 +static struct security_operations default_security_ops __read_only = {
69563 .name = "default",
69564 };
69565
69566 @@ -67,7 +67,9 @@ int __init security_init(void)
69567
69568 void reset_security_ops(void)
69569 {
69570 + pax_open_kernel();
69571 security_ops = &default_security_ops;
69572 + pax_close_kernel();
69573 }
69574
69575 /* Save user chosen LSM */
69576 diff -urNp linux-2.6.39.4/security/selinux/hooks.c linux-2.6.39.4/security/selinux/hooks.c
69577 --- linux-2.6.39.4/security/selinux/hooks.c 2011-05-19 00:06:34.000000000 -0400
69578 +++ linux-2.6.39.4/security/selinux/hooks.c 2011-08-05 19:44:37.000000000 -0400
69579 @@ -93,7 +93,6 @@
69580 #define NUM_SEL_MNT_OPTS 5
69581
69582 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
69583 -extern struct security_operations *security_ops;
69584
69585 /* SECMARK reference count */
69586 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
69587 @@ -5431,7 +5430,7 @@ static int selinux_key_getsecurity(struc
69588
69589 #endif
69590
69591 -static struct security_operations selinux_ops = {
69592 +static struct security_operations selinux_ops __read_only = {
69593 .name = "selinux",
69594
69595 .ptrace_access_check = selinux_ptrace_access_check,
69596 diff -urNp linux-2.6.39.4/security/selinux/include/xfrm.h linux-2.6.39.4/security/selinux/include/xfrm.h
69597 --- linux-2.6.39.4/security/selinux/include/xfrm.h 2011-05-19 00:06:34.000000000 -0400
69598 +++ linux-2.6.39.4/security/selinux/include/xfrm.h 2011-08-05 19:44:37.000000000 -0400
69599 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
69600
69601 static inline void selinux_xfrm_notify_policyload(void)
69602 {
69603 - atomic_inc(&flow_cache_genid);
69604 + atomic_inc_unchecked(&flow_cache_genid);
69605 }
69606 #else
69607 static inline int selinux_xfrm_enabled(void)
69608 diff -urNp linux-2.6.39.4/security/selinux/ss/services.c linux-2.6.39.4/security/selinux/ss/services.c
69609 --- linux-2.6.39.4/security/selinux/ss/services.c 2011-05-19 00:06:34.000000000 -0400
69610 +++ linux-2.6.39.4/security/selinux/ss/services.c 2011-08-05 19:44:37.000000000 -0400
69611 @@ -1806,6 +1806,8 @@ int security_load_policy(void *data, siz
69612 int rc = 0;
69613 struct policy_file file = { data, len }, *fp = &file;
69614
69615 + pax_track_stack();
69616 +
69617 if (!ss_initialized) {
69618 avtab_cache_init();
69619 rc = policydb_read(&policydb, fp);
69620 diff -urNp linux-2.6.39.4/security/smack/smack_lsm.c linux-2.6.39.4/security/smack/smack_lsm.c
69621 --- linux-2.6.39.4/security/smack/smack_lsm.c 2011-05-19 00:06:34.000000000 -0400
69622 +++ linux-2.6.39.4/security/smack/smack_lsm.c 2011-08-05 19:44:37.000000000 -0400
69623 @@ -3386,7 +3386,7 @@ static int smack_inode_getsecctx(struct
69624 return 0;
69625 }
69626
69627 -struct security_operations smack_ops = {
69628 +struct security_operations smack_ops __read_only = {
69629 .name = "smack",
69630
69631 .ptrace_access_check = smack_ptrace_access_check,
69632 diff -urNp linux-2.6.39.4/security/tomoyo/tomoyo.c linux-2.6.39.4/security/tomoyo/tomoyo.c
69633 --- linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-05-19 00:06:34.000000000 -0400
69634 +++ linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-08-05 19:44:37.000000000 -0400
69635 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
69636 * tomoyo_security_ops is a "struct security_operations" which is used for
69637 * registering TOMOYO.
69638 */
69639 -static struct security_operations tomoyo_security_ops = {
69640 +static struct security_operations tomoyo_security_ops __read_only = {
69641 .name = "tomoyo",
69642 .cred_alloc_blank = tomoyo_cred_alloc_blank,
69643 .cred_prepare = tomoyo_cred_prepare,
69644 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.c linux-2.6.39.4/sound/aoa/codecs/onyx.c
69645 --- linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-05-19 00:06:34.000000000 -0400
69646 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-08-05 19:44:37.000000000 -0400
69647 @@ -54,7 +54,7 @@ struct onyx {
69648 spdif_locked:1,
69649 analog_locked:1,
69650 original_mute:2;
69651 - int open_count;
69652 + local_t open_count;
69653 struct codec_info *codec_info;
69654
69655 /* mutex serializes concurrent access to the device
69656 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
69657 struct onyx *onyx = cii->codec_data;
69658
69659 mutex_lock(&onyx->mutex);
69660 - onyx->open_count++;
69661 + local_inc(&onyx->open_count);
69662 mutex_unlock(&onyx->mutex);
69663
69664 return 0;
69665 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
69666 struct onyx *onyx = cii->codec_data;
69667
69668 mutex_lock(&onyx->mutex);
69669 - onyx->open_count--;
69670 - if (!onyx->open_count)
69671 + if (local_dec_and_test(&onyx->open_count))
69672 onyx->spdif_locked = onyx->analog_locked = 0;
69673 mutex_unlock(&onyx->mutex);
69674
69675 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.h linux-2.6.39.4/sound/aoa/codecs/onyx.h
69676 --- linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-05-19 00:06:34.000000000 -0400
69677 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-08-05 19:44:37.000000000 -0400
69678 @@ -11,6 +11,7 @@
69679 #include <linux/i2c.h>
69680 #include <asm/pmac_low_i2c.h>
69681 #include <asm/prom.h>
69682 +#include <asm/local.h>
69683
69684 /* PCM3052 register definitions */
69685
69686 diff -urNp linux-2.6.39.4/sound/core/seq/seq_device.c linux-2.6.39.4/sound/core/seq/seq_device.c
69687 --- linux-2.6.39.4/sound/core/seq/seq_device.c 2011-05-19 00:06:34.000000000 -0400
69688 +++ linux-2.6.39.4/sound/core/seq/seq_device.c 2011-08-05 20:34:06.000000000 -0400
69689 @@ -63,7 +63,7 @@ struct ops_list {
69690 int argsize; /* argument size */
69691
69692 /* operators */
69693 - struct snd_seq_dev_ops ops;
69694 + struct snd_seq_dev_ops *ops;
69695
69696 /* registred devices */
69697 struct list_head dev_list; /* list of devices */
69698 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
69699
69700 mutex_lock(&ops->reg_mutex);
69701 /* copy driver operators */
69702 - ops->ops = *entry;
69703 + ops->ops = entry;
69704 ops->driver |= DRIVER_LOADED;
69705 ops->argsize = argsize;
69706
69707 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
69708 dev->name, ops->id, ops->argsize, dev->argsize);
69709 return -EINVAL;
69710 }
69711 - if (ops->ops.init_device(dev) >= 0) {
69712 + if (ops->ops->init_device(dev) >= 0) {
69713 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
69714 ops->num_init_devices++;
69715 } else {
69716 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
69717 dev->name, ops->id, ops->argsize, dev->argsize);
69718 return -EINVAL;
69719 }
69720 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
69721 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
69722 dev->status = SNDRV_SEQ_DEVICE_FREE;
69723 dev->driver_data = NULL;
69724 ops->num_init_devices--;
69725 diff -urNp linux-2.6.39.4/sound/drivers/mts64.c linux-2.6.39.4/sound/drivers/mts64.c
69726 --- linux-2.6.39.4/sound/drivers/mts64.c 2011-05-19 00:06:34.000000000 -0400
69727 +++ linux-2.6.39.4/sound/drivers/mts64.c 2011-08-05 20:34:06.000000000 -0400
69728 @@ -28,6 +28,7 @@
69729 #include <sound/initval.h>
69730 #include <sound/rawmidi.h>
69731 #include <sound/control.h>
69732 +#include <asm/local.h>
69733
69734 #define CARD_NAME "Miditerminal 4140"
69735 #define DRIVER_NAME "MTS64"
69736 @@ -66,7 +67,7 @@ struct mts64 {
69737 struct pardevice *pardev;
69738 int pardev_claimed;
69739
69740 - int open_count;
69741 + local_t open_count;
69742 int current_midi_output_port;
69743 int current_midi_input_port;
69744 u8 mode[MTS64_NUM_INPUT_PORTS];
69745 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
69746 {
69747 struct mts64 *mts = substream->rmidi->private_data;
69748
69749 - if (mts->open_count == 0) {
69750 + if (local_read(&mts->open_count) == 0) {
69751 /* We don't need a spinlock here, because this is just called
69752 if the device has not been opened before.
69753 So there aren't any IRQs from the device */
69754 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
69755
69756 msleep(50);
69757 }
69758 - ++(mts->open_count);
69759 + local_inc(&mts->open_count);
69760
69761 return 0;
69762 }
69763 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
69764 struct mts64 *mts = substream->rmidi->private_data;
69765 unsigned long flags;
69766
69767 - --(mts->open_count);
69768 - if (mts->open_count == 0) {
69769 + if (local_dec_return(&mts->open_count) == 0) {
69770 /* We need the spinlock_irqsave here because we can still
69771 have IRQs at this point */
69772 spin_lock_irqsave(&mts->lock, flags);
69773 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
69774
69775 msleep(500);
69776
69777 - } else if (mts->open_count < 0)
69778 - mts->open_count = 0;
69779 + } else if (local_read(&mts->open_count) < 0)
69780 + local_set(&mts->open_count, 0);
69781
69782 return 0;
69783 }
69784 diff -urNp linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c
69785 --- linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-05-19 00:06:34.000000000 -0400
69786 +++ linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:34:06.000000000 -0400
69787 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
69788 MODULE_DESCRIPTION("OPL4 driver");
69789 MODULE_LICENSE("GPL");
69790
69791 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
69792 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
69793 {
69794 int timeout = 10;
69795 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
69796 diff -urNp linux-2.6.39.4/sound/drivers/portman2x4.c linux-2.6.39.4/sound/drivers/portman2x4.c
69797 --- linux-2.6.39.4/sound/drivers/portman2x4.c 2011-05-19 00:06:34.000000000 -0400
69798 +++ linux-2.6.39.4/sound/drivers/portman2x4.c 2011-08-05 20:34:06.000000000 -0400
69799 @@ -47,6 +47,7 @@
69800 #include <sound/initval.h>
69801 #include <sound/rawmidi.h>
69802 #include <sound/control.h>
69803 +#include <asm/local.h>
69804
69805 #define CARD_NAME "Portman 2x4"
69806 #define DRIVER_NAME "portman"
69807 @@ -84,7 +85,7 @@ struct portman {
69808 struct pardevice *pardev;
69809 int pardev_claimed;
69810
69811 - int open_count;
69812 + local_t open_count;
69813 int mode[PORTMAN_NUM_INPUT_PORTS];
69814 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
69815 };
69816 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.c linux-2.6.39.4/sound/firewire/amdtp.c
69817 --- linux-2.6.39.4/sound/firewire/amdtp.c 2011-05-19 00:06:34.000000000 -0400
69818 +++ linux-2.6.39.4/sound/firewire/amdtp.c 2011-08-05 19:44:37.000000000 -0400
69819 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
69820 ptr = s->pcm_buffer_pointer + data_blocks;
69821 if (ptr >= pcm->runtime->buffer_size)
69822 ptr -= pcm->runtime->buffer_size;
69823 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
69824 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
69825
69826 s->pcm_period_pointer += data_blocks;
69827 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
69828 @@ -510,7 +510,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
69829 */
69830 void amdtp_out_stream_update(struct amdtp_out_stream *s)
69831 {
69832 - ACCESS_ONCE(s->source_node_id_field) =
69833 + ACCESS_ONCE_RW(s->source_node_id_field) =
69834 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
69835 }
69836 EXPORT_SYMBOL(amdtp_out_stream_update);
69837 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.h linux-2.6.39.4/sound/firewire/amdtp.h
69838 --- linux-2.6.39.4/sound/firewire/amdtp.h 2011-05-19 00:06:34.000000000 -0400
69839 +++ linux-2.6.39.4/sound/firewire/amdtp.h 2011-08-05 19:44:37.000000000 -0400
69840 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
69841 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
69842 struct snd_pcm_substream *pcm)
69843 {
69844 - ACCESS_ONCE(s->pcm) = pcm;
69845 + ACCESS_ONCE_RW(s->pcm) = pcm;
69846 }
69847
69848 /**
69849 diff -urNp linux-2.6.39.4/sound/isa/cmi8330.c linux-2.6.39.4/sound/isa/cmi8330.c
69850 --- linux-2.6.39.4/sound/isa/cmi8330.c 2011-05-19 00:06:34.000000000 -0400
69851 +++ linux-2.6.39.4/sound/isa/cmi8330.c 2011-08-05 20:34:06.000000000 -0400
69852 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
69853
69854 struct snd_pcm *pcm;
69855 struct snd_cmi8330_stream {
69856 - struct snd_pcm_ops ops;
69857 + snd_pcm_ops_no_const ops;
69858 snd_pcm_open_callback_t open;
69859 void *private_data; /* sb or wss */
69860 } streams[2];
69861 diff -urNp linux-2.6.39.4/sound/oss/sb_audio.c linux-2.6.39.4/sound/oss/sb_audio.c
69862 --- linux-2.6.39.4/sound/oss/sb_audio.c 2011-05-19 00:06:34.000000000 -0400
69863 +++ linux-2.6.39.4/sound/oss/sb_audio.c 2011-08-05 19:44:37.000000000 -0400
69864 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
69865 buf16 = (signed short *)(localbuf + localoffs);
69866 while (c)
69867 {
69868 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69869 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69870 if (copy_from_user(lbuf8,
69871 userbuf+useroffs + p,
69872 locallen))
69873 diff -urNp linux-2.6.39.4/sound/oss/swarm_cs4297a.c linux-2.6.39.4/sound/oss/swarm_cs4297a.c
69874 --- linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-05-19 00:06:34.000000000 -0400
69875 +++ linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-08-05 19:44:37.000000000 -0400
69876 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
69877 {
69878 struct cs4297a_state *s;
69879 u32 pwr, id;
69880 - mm_segment_t fs;
69881 int rval;
69882 #ifndef CONFIG_BCM_CS4297A_CSWARM
69883 u64 cfg;
69884 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
69885 if (!rval) {
69886 char *sb1250_duart_present;
69887
69888 +#if 0
69889 + mm_segment_t fs;
69890 fs = get_fs();
69891 set_fs(KERNEL_DS);
69892 -#if 0
69893 val = SOUND_MASK_LINE;
69894 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
69895 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
69896 val = initvol[i].vol;
69897 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
69898 }
69899 + set_fs(fs);
69900 // cs4297a_write_ac97(s, 0x18, 0x0808);
69901 #else
69902 // cs4297a_write_ac97(s, 0x5e, 0x180);
69903 cs4297a_write_ac97(s, 0x02, 0x0808);
69904 cs4297a_write_ac97(s, 0x18, 0x0808);
69905 #endif
69906 - set_fs(fs);
69907
69908 list_add(&s->list, &cs4297a_devs);
69909
69910 diff -urNp linux-2.6.39.4/sound/pci/hda/hda_codec.h linux-2.6.39.4/sound/pci/hda/hda_codec.h
69911 --- linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-05-19 00:06:34.000000000 -0400
69912 +++ linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-08-05 20:34:06.000000000 -0400
69913 @@ -615,7 +615,7 @@ struct hda_bus_ops {
69914 /* notify power-up/down from codec to controller */
69915 void (*pm_notify)(struct hda_bus *bus);
69916 #endif
69917 -};
69918 +} __no_const;
69919
69920 /* template to pass to the bus constructor */
69921 struct hda_bus_template {
69922 @@ -713,6 +713,7 @@ struct hda_codec_ops {
69923 #endif
69924 void (*reboot_notify)(struct hda_codec *codec);
69925 };
69926 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
69927
69928 /* record for amp information cache */
69929 struct hda_cache_head {
69930 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
69931 struct snd_pcm_substream *substream);
69932 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
69933 struct snd_pcm_substream *substream);
69934 -};
69935 +} __no_const;
69936
69937 /* PCM information for each substream */
69938 struct hda_pcm_stream {
69939 @@ -801,7 +802,7 @@ struct hda_codec {
69940 const char *modelname; /* model name for preset */
69941
69942 /* set by patch */
69943 - struct hda_codec_ops patch_ops;
69944 + hda_codec_ops_no_const patch_ops;
69945
69946 /* PCM to create, set by patch_ops.build_pcms callback */
69947 unsigned int num_pcms;
69948 diff -urNp linux-2.6.39.4/sound/pci/ice1712/ice1712.h linux-2.6.39.4/sound/pci/ice1712/ice1712.h
69949 --- linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-05-19 00:06:34.000000000 -0400
69950 +++ linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-08-05 20:34:06.000000000 -0400
69951 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
69952 unsigned int mask_flags; /* total mask bits */
69953 struct snd_akm4xxx_ops {
69954 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69955 - } ops;
69956 + } __no_const ops;
69957 };
69958
69959 struct snd_ice1712_spdif {
69960 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
69961 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69962 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69963 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69964 - } ops;
69965 + } __no_const ops;
69966 };
69967
69968
69969 diff -urNp linux-2.6.39.4/sound/pci/intel8x0m.c linux-2.6.39.4/sound/pci/intel8x0m.c
69970 --- linux-2.6.39.4/sound/pci/intel8x0m.c 2011-05-19 00:06:34.000000000 -0400
69971 +++ linux-2.6.39.4/sound/pci/intel8x0m.c 2011-08-05 20:34:06.000000000 -0400
69972 @@ -1265,7 +1265,7 @@ static struct shortname_table {
69973 { 0x5455, "ALi M5455" },
69974 { 0x746d, "AMD AMD8111" },
69975 #endif
69976 - { 0 },
69977 + { 0, },
69978 };
69979
69980 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
69981 diff -urNp linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c
69982 --- linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-05-19 00:06:34.000000000 -0400
69983 +++ linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-08-05 20:34:06.000000000 -0400
69984 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
69985 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
69986 break;
69987 }
69988 - if (atomic_read(&chip->interrupt_sleep_count)) {
69989 - atomic_set(&chip->interrupt_sleep_count, 0);
69990 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69991 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69992 wake_up(&chip->interrupt_sleep);
69993 }
69994 __end:
69995 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
69996 continue;
69997 init_waitqueue_entry(&wait, current);
69998 add_wait_queue(&chip->interrupt_sleep, &wait);
69999 - atomic_inc(&chip->interrupt_sleep_count);
70000 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
70001 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
70002 remove_wait_queue(&chip->interrupt_sleep, &wait);
70003 }
70004 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
70005 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
70006 spin_unlock(&chip->reg_lock);
70007
70008 - if (atomic_read(&chip->interrupt_sleep_count)) {
70009 - atomic_set(&chip->interrupt_sleep_count, 0);
70010 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
70011 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
70012 wake_up(&chip->interrupt_sleep);
70013 }
70014 }
70015 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
70016 spin_lock_init(&chip->reg_lock);
70017 spin_lock_init(&chip->voice_lock);
70018 init_waitqueue_head(&chip->interrupt_sleep);
70019 - atomic_set(&chip->interrupt_sleep_count, 0);
70020 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
70021 chip->card = card;
70022 chip->pci = pci;
70023 chip->irq = -1;
70024 diff -urNp linux-2.6.39.4/sound/soc/soc-core.c linux-2.6.39.4/sound/soc/soc-core.c
70025 --- linux-2.6.39.4/sound/soc/soc-core.c 2011-05-19 00:06:34.000000000 -0400
70026 +++ linux-2.6.39.4/sound/soc/soc-core.c 2011-08-05 20:34:06.000000000 -0400
70027 @@ -1027,7 +1027,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
70028 }
70029
70030 /* ASoC PCM operations */
70031 -static struct snd_pcm_ops soc_pcm_ops = {
70032 +static snd_pcm_ops_no_const soc_pcm_ops = {
70033 .open = soc_pcm_open,
70034 .close = soc_codec_close,
70035 .hw_params = soc_pcm_hw_params,
70036 @@ -2105,6 +2105,7 @@ static int soc_new_pcm(struct snd_soc_pc
70037
70038 rtd->pcm = pcm;
70039 pcm->private_data = rtd;
70040 + /* this whole logic is broken... */
70041 soc_pcm_ops.mmap = platform->driver->ops->mmap;
70042 soc_pcm_ops.pointer = platform->driver->ops->pointer;
70043 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
70044 diff -urNp linux-2.6.39.4/sound/usb/card.h linux-2.6.39.4/sound/usb/card.h
70045 --- linux-2.6.39.4/sound/usb/card.h 2011-05-19 00:06:34.000000000 -0400
70046 +++ linux-2.6.39.4/sound/usb/card.h 2011-08-05 20:34:06.000000000 -0400
70047 @@ -44,6 +44,7 @@ struct snd_urb_ops {
70048 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
70049 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
70050 };
70051 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
70052
70053 struct snd_usb_substream {
70054 struct snd_usb_stream *stream;
70055 @@ -93,7 +94,7 @@ struct snd_usb_substream {
70056 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
70057 spinlock_t lock;
70058
70059 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
70060 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
70061 };
70062
70063 struct snd_usb_stream {
70064 diff -urNp linux-2.6.39.4/tools/gcc/constify_plugin.c linux-2.6.39.4/tools/gcc/constify_plugin.c
70065 --- linux-2.6.39.4/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
70066 +++ linux-2.6.39.4/tools/gcc/constify_plugin.c 2011-08-05 20:34:06.000000000 -0400
70067 @@ -0,0 +1,189 @@
70068 +/*
70069 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
70070 + * Licensed under the GPL v2, or (at your option) v3
70071 + *
70072 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
70073 + *
70074 + * Usage:
70075 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
70076 + * $ gcc -fplugin=constify_plugin.so test.c -O2
70077 + */
70078 +
70079 +#include "gcc-plugin.h"
70080 +#include "config.h"
70081 +#include "system.h"
70082 +#include "coretypes.h"
70083 +#include "tree.h"
70084 +#include "tree-pass.h"
70085 +#include "intl.h"
70086 +#include "plugin-version.h"
70087 +#include "tm.h"
70088 +#include "toplev.h"
70089 +#include "function.h"
70090 +#include "tree-flow.h"
70091 +#include "plugin.h"
70092 +
70093 +int plugin_is_GPL_compatible;
70094 +
70095 +static struct plugin_info const_plugin_info = {
70096 + .version = "20110721",
70097 + .help = "no-constify\tturn off constification\n",
70098 +};
70099 +
70100 +static bool walk_struct(tree node);
70101 +
70102 +static void deconstify_node(tree node)
70103 +{
70104 + tree field;
70105 +
70106 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70107 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70108 + if (code == RECORD_TYPE || code == UNION_TYPE)
70109 + deconstify_node(TREE_TYPE(field));
70110 + TREE_READONLY(field) = 0;
70111 + TREE_READONLY(TREE_TYPE(field)) = 0;
70112 + }
70113 +}
70114 +
70115 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
70116 +{
70117 + if (TREE_CODE(*node) == FUNCTION_DECL) {
70118 + error("%qE attribute does not apply to functions", name);
70119 + *no_add_attrs = true;
70120 + return NULL_TREE;
70121 + }
70122 +
70123 + if (DECL_P(*node) && lookup_attribute("no_const", TYPE_ATTRIBUTES(TREE_TYPE(*node)))) {
70124 + error("%qE attribute is already applied to the type" , name);
70125 + *no_add_attrs = true;
70126 + return NULL_TREE;
70127 + }
70128 +
70129 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(TREE_TYPE(*node))) {
70130 + error("%qE attribute used on type that is not constified" , name);
70131 + *no_add_attrs = true;
70132 + return NULL_TREE;
70133 + }
70134 +
70135 + if (TREE_CODE(*node) == TYPE_DECL) {
70136 + tree chain = TREE_CHAIN(TREE_TYPE(*node));
70137 + TREE_TYPE(*node) = copy_node(TREE_TYPE(*node));
70138 + TREE_CHAIN(TREE_TYPE(*node)) = copy_list(chain);
70139 + TREE_READONLY(TREE_TYPE(*node)) = 0;
70140 + deconstify_node(TREE_TYPE(*node));
70141 + return NULL_TREE;
70142 + }
70143 +
70144 + return NULL_TREE;
70145 +}
70146 +
70147 +static struct attribute_spec no_const_attr = {
70148 + .name = "no_const",
70149 + .min_length = 0,
70150 + .max_length = 0,
70151 + .decl_required = false,
70152 + .type_required = false,
70153 + .function_type_required = false,
70154 + .handler = handle_no_const_attribute
70155 +};
70156 +
70157 +static void register_attributes(void *event_data, void *data)
70158 +{
70159 + register_attribute(&no_const_attr);
70160 +}
70161 +
70162 +/*
70163 +static void printnode(char *prefix, tree node)
70164 +{
70165 + enum tree_code code;
70166 + enum tree_code_class tclass;
70167 +
70168 + tclass = TREE_CODE_CLASS(TREE_CODE (node));
70169 +
70170 + code = TREE_CODE(node);
70171 + fprintf(stderr, "\n%s node: %p, code: %d type: %s\n", prefix, node, code, tree_code_name[(int)code]);
70172 + if (DECL_CONTEXT(node) != NULL_TREE && TYPE_NAME(DECL_CONTEXT(node)) != NULL_TREE)
70173 + fprintf(stderr, "struct name: %s\n", IDENTIFIER_POINTER(TYPE_NAME(DECL_CONTEXT(node))));
70174 + if (tclass == tcc_declaration && DECL_NAME(node) != NULL_TREE)
70175 + fprintf(stderr, "field name: %s\n", IDENTIFIER_POINTER(DECL_NAME(node)));
70176 +}
70177 +*/
70178 +
70179 +static void constify_node(tree node)
70180 +{
70181 + TREE_READONLY(node) = 1;
70182 +}
70183 +
70184 +static bool is_fptr(tree field)
70185 +{
70186 + tree ptr = TREE_TYPE(field);
70187 +
70188 + if (TREE_CODE(ptr) != POINTER_TYPE)
70189 + return false;
70190 +
70191 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
70192 +}
70193 +
70194 +static bool walk_struct(tree node)
70195 +{
70196 + tree field;
70197 +
70198 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70199 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70200 + if (code == RECORD_TYPE || code == UNION_TYPE) {
70201 + if (!(walk_struct(TREE_TYPE(field))))
70202 + return false;
70203 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
70204 + return false;
70205 + }
70206 + return true;
70207 +}
70208 +
70209 +static void finish_type(void *event_data, void *data)
70210 +{
70211 + tree node = (tree)event_data;
70212 +
70213 + if (node == NULL_TREE)
70214 + return;
70215 +
70216 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
70217 + return;
70218 +
70219 + if (TREE_READONLY(node))
70220 + return;
70221 +
70222 + if (TYPE_FIELDS(node) == NULL_TREE)
70223 + return;
70224 +
70225 + if (walk_struct(node))
70226 + constify_node(node);
70227 +}
70228 +
70229 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70230 +{
70231 + const char * const plugin_name = plugin_info->base_name;
70232 + const int argc = plugin_info->argc;
70233 + const struct plugin_argument * const argv = plugin_info->argv;
70234 + int i;
70235 + bool constify = true;
70236 +
70237 + if (!plugin_default_version_check(version, &gcc_version)) {
70238 + error(G_("incompatible gcc/plugin versions"));
70239 + return 1;
70240 + }
70241 +
70242 + for (i = 0; i < argc; ++i) {
70243 + if (!(strcmp(argv[i].key, "no-constify"))) {
70244 + constify = false;
70245 + continue;
70246 + }
70247 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70248 + }
70249 +
70250 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
70251 + if (constify)
70252 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
70253 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
70254 +
70255 + return 0;
70256 +}
70257 diff -urNp linux-2.6.39.4/tools/gcc/Makefile linux-2.6.39.4/tools/gcc/Makefile
70258 --- linux-2.6.39.4/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
70259 +++ linux-2.6.39.4/tools/gcc/Makefile 2011-08-05 20:34:06.000000000 -0400
70260 @@ -0,0 +1,12 @@
70261 +#CC := gcc
70262 +#PLUGIN_SOURCE_FILES := pax_plugin.c
70263 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
70264 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
70265 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
70266 +
70267 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
70268 +
70269 +hostlibs-y := stackleak_plugin.so constify_plugin.so
70270 +always := $(hostlibs-y)
70271 +stackleak_plugin-objs := stackleak_plugin.o
70272 +constify_plugin-objs := constify_plugin.o
70273 diff -urNp linux-2.6.39.4/tools/gcc/stackleak_plugin.c linux-2.6.39.4/tools/gcc/stackleak_plugin.c
70274 --- linux-2.6.39.4/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
70275 +++ linux-2.6.39.4/tools/gcc/stackleak_plugin.c 2011-08-05 20:34:06.000000000 -0400
70276 @@ -0,0 +1,243 @@
70277 +/*
70278 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
70279 + * Licensed under the GPL v2
70280 + *
70281 + * Note: the choice of the license means that the compilation process is
70282 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
70283 + * but for the kernel it doesn't matter since it doesn't link against
70284 + * any of the gcc libraries
70285 + *
70286 + * gcc plugin to help implement various PaX features
70287 + *
70288 + * - track lowest stack pointer
70289 + *
70290 + * TODO:
70291 + * - initialize all local variables
70292 + *
70293 + * BUGS:
70294 + * - cloned functions are instrumented twice
70295 + */
70296 +#include "gcc-plugin.h"
70297 +#include "plugin-version.h"
70298 +#include "config.h"
70299 +#include "system.h"
70300 +#include "coretypes.h"
70301 +#include "tm.h"
70302 +#include "toplev.h"
70303 +#include "basic-block.h"
70304 +#include "gimple.h"
70305 +//#include "expr.h" where are you...
70306 +#include "diagnostic.h"
70307 +#include "rtl.h"
70308 +#include "emit-rtl.h"
70309 +#include "function.h"
70310 +#include "tree.h"
70311 +#include "tree-pass.h"
70312 +#include "intl.h"
70313 +
70314 +int plugin_is_GPL_compatible;
70315 +
70316 +static int track_frame_size = -1;
70317 +static const char track_function[] = "pax_track_stack";
70318 +static bool init_locals;
70319 +
70320 +static struct plugin_info stackleak_plugin_info = {
70321 + .version = "201106030000",
70322 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
70323 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
70324 +};
70325 +
70326 +static bool gate_stackleak_track_stack(void);
70327 +static unsigned int execute_stackleak_tree_instrument(void);
70328 +static unsigned int execute_stackleak_final(void);
70329 +
70330 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
70331 + .pass = {
70332 + .type = GIMPLE_PASS,
70333 + .name = "stackleak_tree_instrument",
70334 + .gate = gate_stackleak_track_stack,
70335 + .execute = execute_stackleak_tree_instrument,
70336 + .sub = NULL,
70337 + .next = NULL,
70338 + .static_pass_number = 0,
70339 + .tv_id = TV_NONE,
70340 + .properties_required = PROP_gimple_leh | PROP_cfg,
70341 + .properties_provided = 0,
70342 + .properties_destroyed = 0,
70343 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
70344 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
70345 + }
70346 +};
70347 +
70348 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
70349 + .pass = {
70350 + .type = RTL_PASS,
70351 + .name = "stackleak_final",
70352 + .gate = gate_stackleak_track_stack,
70353 + .execute = execute_stackleak_final,
70354 + .sub = NULL,
70355 + .next = NULL,
70356 + .static_pass_number = 0,
70357 + .tv_id = TV_NONE,
70358 + .properties_required = 0,
70359 + .properties_provided = 0,
70360 + .properties_destroyed = 0,
70361 + .todo_flags_start = 0,
70362 + .todo_flags_finish = 0
70363 + }
70364 +};
70365 +
70366 +static bool gate_stackleak_track_stack(void)
70367 +{
70368 + return track_frame_size >= 0;
70369 +}
70370 +
70371 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
70372 +{
70373 + gimple call;
70374 + tree decl, type;
70375 +
70376 + // insert call to void pax_track_stack(void)
70377 + type = build_function_type_list(void_type_node, NULL_TREE);
70378 + decl = build_fn_decl(track_function, type);
70379 + DECL_ASSEMBLER_NAME(decl); // for LTO
70380 + call = gimple_build_call(decl, 0);
70381 + if (before)
70382 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
70383 + else
70384 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
70385 +}
70386 +
70387 +static unsigned int execute_stackleak_tree_instrument(void)
70388 +{
70389 + basic_block bb;
70390 + gimple_stmt_iterator gsi;
70391 +
70392 + // 1. loop through BBs and GIMPLE statements
70393 + FOR_EACH_BB(bb) {
70394 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
70395 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
70396 + tree decl;
70397 + gimple stmt = gsi_stmt(gsi);
70398 +
70399 + if (!is_gimple_call(stmt))
70400 + continue;
70401 + decl = gimple_call_fndecl(stmt);
70402 + if (!decl)
70403 + continue;
70404 + if (TREE_CODE(decl) != FUNCTION_DECL)
70405 + continue;
70406 + if (!DECL_BUILT_IN(decl))
70407 + continue;
70408 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
70409 + continue;
70410 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
70411 + continue;
70412 +
70413 + // 2. insert track call after each __builtin_alloca call
70414 + stackleak_add_instrumentation(&gsi, false);
70415 +// print_node(stderr, "pax", decl, 4);
70416 + }
70417 + }
70418 +
70419 + // 3. insert track call at the beginning
70420 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
70421 + gsi = gsi_start_bb(bb);
70422 + stackleak_add_instrumentation(&gsi, true);
70423 +
70424 + return 0;
70425 +}
70426 +
70427 +static unsigned int execute_stackleak_final(void)
70428 +{
70429 + rtx insn;
70430 +
70431 + if (cfun->calls_alloca)
70432 + return 0;
70433 +
70434 + // 1. find pax_track_stack calls
70435 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
70436 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
70437 + rtx body;
70438 +
70439 + if (!CALL_P(insn))
70440 + continue;
70441 + body = PATTERN(insn);
70442 + if (GET_CODE(body) != CALL)
70443 + continue;
70444 + body = XEXP(body, 0);
70445 + if (GET_CODE(body) != MEM)
70446 + continue;
70447 + body = XEXP(body, 0);
70448 + if (GET_CODE(body) != SYMBOL_REF)
70449 + continue;
70450 + if (strcmp(XSTR(body, 0), track_function))
70451 + continue;
70452 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70453 + // 2. delete call if function frame is not big enough
70454 + if (get_frame_size() >= track_frame_size)
70455 + continue;
70456 + delete_insn_and_edges(insn);
70457 + }
70458 +
70459 +// print_simple_rtl(stderr, get_insns());
70460 +// print_rtl(stderr, get_insns());
70461 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70462 +
70463 + return 0;
70464 +}
70465 +
70466 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70467 +{
70468 + const char * const plugin_name = plugin_info->base_name;
70469 + const int argc = plugin_info->argc;
70470 + const struct plugin_argument * const argv = plugin_info->argv;
70471 + int i;
70472 + struct register_pass_info stackleak_tree_instrument_pass_info = {
70473 + .pass = &stackleak_tree_instrument_pass.pass,
70474 +// .reference_pass_name = "tree_profile",
70475 + .reference_pass_name = "optimized",
70476 + .ref_pass_instance_number = 0,
70477 + .pos_op = PASS_POS_INSERT_AFTER
70478 + };
70479 + struct register_pass_info stackleak_final_pass_info = {
70480 + .pass = &stackleak_final_rtl_opt_pass.pass,
70481 + .reference_pass_name = "final",
70482 + .ref_pass_instance_number = 0,
70483 + .pos_op = PASS_POS_INSERT_BEFORE
70484 + };
70485 +
70486 + if (!plugin_default_version_check(version, &gcc_version)) {
70487 + error(G_("incompatible gcc/plugin versions"));
70488 + return 1;
70489 + }
70490 +
70491 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
70492 +
70493 + for (i = 0; i < argc; ++i) {
70494 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
70495 + if (!argv[i].value) {
70496 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70497 + continue;
70498 + }
70499 + track_frame_size = atoi(argv[i].value);
70500 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
70501 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70502 + continue;
70503 + }
70504 + if (!strcmp(argv[i].key, "initialize-locals")) {
70505 + if (argv[i].value) {
70506 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70507 + continue;
70508 + }
70509 + init_locals = true;
70510 + continue;
70511 + }
70512 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70513 + }
70514 +
70515 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
70516 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
70517 +
70518 + return 0;
70519 +}
70520 diff -urNp linux-2.6.39.4/usr/gen_init_cpio.c linux-2.6.39.4/usr/gen_init_cpio.c
70521 --- linux-2.6.39.4/usr/gen_init_cpio.c 2011-05-19 00:06:34.000000000 -0400
70522 +++ linux-2.6.39.4/usr/gen_init_cpio.c 2011-08-05 19:44:38.000000000 -0400
70523 @@ -305,7 +305,7 @@ static int cpio_mkfile(const char *name,
70524 int retval;
70525 int rc = -1;
70526 int namesize;
70527 - int i;
70528 + unsigned int i;
70529
70530 mode |= S_IFREG;
70531
70532 @@ -394,9 +394,10 @@ static char *cpio_replace_env(char *new_
70533 *env_var = *expanded = '\0';
70534 strncat(env_var, start + 2, end - start - 2);
70535 strncat(expanded, new_location, start - new_location);
70536 - strncat(expanded, getenv(env_var), PATH_MAX);
70537 - strncat(expanded, end + 1, PATH_MAX);
70538 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
70539 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
70540 strncpy(new_location, expanded, PATH_MAX);
70541 + new_location[PATH_MAX] = 0;
70542 } else
70543 break;
70544 }
70545 diff -urNp linux-2.6.39.4/virt/kvm/kvm_main.c linux-2.6.39.4/virt/kvm/kvm_main.c
70546 --- linux-2.6.39.4/virt/kvm/kvm_main.c 2011-05-19 00:06:34.000000000 -0400
70547 +++ linux-2.6.39.4/virt/kvm/kvm_main.c 2011-08-05 20:34:06.000000000 -0400
70548 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
70549
70550 static cpumask_var_t cpus_hardware_enabled;
70551 static int kvm_usage_count = 0;
70552 -static atomic_t hardware_enable_failed;
70553 +static atomic_unchecked_t hardware_enable_failed;
70554
70555 struct kmem_cache *kvm_vcpu_cache;
70556 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
70557 @@ -2187,7 +2187,7 @@ static void hardware_enable_nolock(void
70558
70559 if (r) {
70560 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
70561 - atomic_inc(&hardware_enable_failed);
70562 + atomic_inc_unchecked(&hardware_enable_failed);
70563 printk(KERN_INFO "kvm: enabling virtualization on "
70564 "CPU%d failed\n", cpu);
70565 }
70566 @@ -2241,10 +2241,10 @@ static int hardware_enable_all(void)
70567
70568 kvm_usage_count++;
70569 if (kvm_usage_count == 1) {
70570 - atomic_set(&hardware_enable_failed, 0);
70571 + atomic_set_unchecked(&hardware_enable_failed, 0);
70572 on_each_cpu(hardware_enable_nolock, NULL, 1);
70573
70574 - if (atomic_read(&hardware_enable_failed)) {
70575 + if (atomic_read_unchecked(&hardware_enable_failed)) {
70576 hardware_disable_all_nolock();
70577 r = -EBUSY;
70578 }
70579 @@ -2509,7 +2509,7 @@ static void kvm_sched_out(struct preempt
70580 kvm_arch_vcpu_put(vcpu);
70581 }
70582
70583 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70584 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70585 struct module *module)
70586 {
70587 int r;
70588 @@ -2572,7 +2572,7 @@ int kvm_init(void *opaque, unsigned vcpu
70589 if (!vcpu_align)
70590 vcpu_align = __alignof__(struct kvm_vcpu);
70591 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
70592 - 0, NULL);
70593 + SLAB_USERCOPY, NULL);
70594 if (!kvm_vcpu_cache) {
70595 r = -ENOMEM;
70596 goto out_free_3;
70597 @@ -2582,9 +2582,11 @@ int kvm_init(void *opaque, unsigned vcpu
70598 if (r)
70599 goto out_free;
70600
70601 - kvm_chardev_ops.owner = module;
70602 - kvm_vm_fops.owner = module;
70603 - kvm_vcpu_fops.owner = module;
70604 + pax_open_kernel();
70605 + *(void **)&kvm_chardev_ops.owner = module;
70606 + *(void **)&kvm_vm_fops.owner = module;
70607 + *(void **)&kvm_vcpu_fops.owner = module;
70608 + pax_close_kernel();
70609
70610 r = misc_register(&kvm_dev);
70611 if (r) {