]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.39.4-201108132040.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.39.4-201108132040.patch
1 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/elf.h linux-2.6.39.4/arch/alpha/include/asm/elf.h
2 --- linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
3 +++ linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/pgtable.h linux-2.6.39.4/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
20 +++ linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.39.4/arch/alpha/kernel/module.c linux-2.6.39.4/arch/alpha/kernel/module.c
40 --- linux-2.6.39.4/arch/alpha/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
41 +++ linux-2.6.39.4/arch/alpha/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.39.4/arch/alpha/kernel/osf_sys.c linux-2.6.39.4/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 21:11:51.000000000 -0400
53 +++ linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 19:44:33.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.39.4/arch/alpha/mm/fault.c linux-2.6.39.4/arch/alpha/mm/fault.c
86 --- linux-2.6.39.4/arch/alpha/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
87 +++ linux-2.6.39.4/arch/alpha/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.39.4/arch/arm/include/asm/elf.h linux-2.6.39.4/arch/arm/include/asm/elf.h
245 --- linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
246 +++ linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
247 @@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-2.6.39.4/arch/arm/include/asm/kmap_types.h linux-2.6.39.4/arch/arm/include/asm/kmap_types.h
275 --- linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
276 +++ linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-2.6.39.4/arch/arm/include/asm/uaccess.h linux-2.6.39.4/arch/arm/include/asm/uaccess.h
286 --- linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
287 +++ linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-2.6.39.4/arch/arm/kernel/armksyms.c linux-2.6.39.4/arch/arm/kernel/armksyms.c
344 --- linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-05-19 00:06:34.000000000 -0400
345 +++ linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-08-05 19:44:33.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-2.6.39.4/arch/arm/kernel/process.c linux-2.6.39.4/arch/arm/kernel/process.c
358 --- linux-2.6.39.4/arch/arm/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
359 +++ linux-2.6.39.4/arch/arm/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-2.6.39.4/arch/arm/kernel/traps.c linux-2.6.39.4/arch/arm/kernel/traps.c
382 --- linux-2.6.39.4/arch/arm/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
383 +++ linux-2.6.39.4/arch/arm/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
384 @@ -258,6 +258,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -285,6 +287,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_from_user.S linux-2.6.39.4/arch/arm/lib/copy_from_user.S
404 --- linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-05-19 00:06:34.000000000 -0400
405 +++ linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-08-05 19:44:33.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_to_user.S linux-2.6.39.4/arch/arm/lib/copy_to_user.S
430 --- linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-05-19 00:06:34.000000000 -0400
431 +++ linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-08-05 19:44:33.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess.S linux-2.6.39.4/arch/arm/lib/uaccess.S
456 --- linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-05-19 00:06:34.000000000 -0400
457 +++ linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-08-05 19:44:33.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-05-19 00:06:34.000000000 -0400
513 +++ linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-05 19:44:33.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-05-19 00:06:34.000000000 -0400
525 +++ linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-05 19:44:33.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-2.6.39.4/arch/arm/mm/fault.c linux-2.6.39.4/arch/arm/mm/fault.c
536 --- linux-2.6.39.4/arch/arm/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
537 +++ linux-2.6.39.4/arch/arm/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-2.6.39.4/arch/arm/mm/mmap.c linux-2.6.39.4/arch/arm/mm/mmap.c
587 --- linux-2.6.39.4/arch/arm/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
588 +++ linux-2.6.39.4/arch/arm/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/elf.h linux-2.6.39.4/arch/avr32/include/asm/elf.h
639 --- linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
640 +++ linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h
658 --- linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
659 +++ linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-2.6.39.4/arch/avr32/mm/fault.c linux-2.6.39.4/arch/avr32/mm/fault.c
671 --- linux-2.6.39.4/arch/avr32/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
672 +++ linux-2.6.39.4/arch/avr32/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-2.6.39.4/arch/frv/include/asm/kmap_types.h linux-2.6.39.4/arch/frv/include/asm/kmap_types.h
715 --- linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
716 +++ linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-2.6.39.4/arch/frv/mm/elf-fdpic.c linux-2.6.39.4/arch/frv/mm/elf-fdpic.c
726 --- linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-05-19 00:06:34.000000000 -0400
727 +++ linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-08-05 19:44:33.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/elf.h linux-2.6.39.4/arch/ia64/include/asm/elf.h
757 --- linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
758 +++ linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/pgtable.h linux-2.6.39.4/arch/ia64/include/asm/pgtable.h
774 --- linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
775 +++ linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/spinlock.h linux-2.6.39.4/arch/ia64/include/asm/spinlock.h
804 --- linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
805 +++ linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/uaccess.h linux-2.6.39.4/arch/ia64/include/asm/uaccess.h
816 --- linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
817 +++ linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-2.6.39.4/arch/ia64/kernel/module.c linux-2.6.39.4/arch/ia64/kernel/module.c
837 --- linux-2.6.39.4/arch/ia64/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
838 +++ linux-2.6.39.4/arch/ia64/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c
928 --- linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-05-19 00:06:34.000000000 -0400
929 +++ linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-08-05 19:44:33.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
964 +++ linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-05 19:44:33.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-2.6.39.4/arch/ia64/mm/fault.c linux-2.6.39.4/arch/ia64/mm/fault.c
975 --- linux-2.6.39.4/arch/ia64/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
976 +++ linux-2.6.39.4/arch/ia64/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
977 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c
1027 --- linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
1028 +++ linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-2.6.39.4/arch/ia64/mm/init.c linux-2.6.39.4/arch/ia64/mm/init.c
1039 --- linux-2.6.39.4/arch/ia64/mm/init.c 2011-05-19 00:06:34.000000000 -0400
1040 +++ linux-2.6.39.4/arch/ia64/mm/init.c 2011-08-05 19:44:33.000000000 -0400
1041 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-2.6.39.4/arch/m32r/lib/usercopy.c linux-2.6.39.4/arch/m32r/lib/usercopy.c
1062 --- linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-05-19 00:06:34.000000000 -0400
1063 +++ linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-08-05 19:44:33.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-2.6.39.4/arch/mips/include/asm/elf.h linux-2.6.39.4/arch/mips/include/asm/elf.h
1085 --- linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1086 +++ linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-2.6.39.4/arch/mips/include/asm/page.h linux-2.6.39.4/arch/mips/include/asm/page.h
1109 --- linux-2.6.39.4/arch/mips/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1110 +++ linux-2.6.39.4/arch/mips/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-2.6.39.4/arch/mips/include/asm/system.h linux-2.6.39.4/arch/mips/include/asm/system.h
1121 --- linux-2.6.39.4/arch/mips/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1122 +++ linux-2.6.39.4/arch/mips/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-05-19 00:06:34.000000000 -0400
1133 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-05 19:44:33.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-05-19 00:06:34.000000000 -0400
1150 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-05 19:44:33.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-2.6.39.4/arch/mips/kernel/process.c linux-2.6.39.4/arch/mips/kernel/process.c
1166 --- linux-2.6.39.4/arch/mips/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
1167 +++ linux-2.6.39.4/arch/mips/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-2.6.39.4/arch/mips/kernel/syscall.c linux-2.6.39.4/arch/mips/kernel/syscall.c
1185 --- linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-05-19 00:06:34.000000000 -0400
1186 +++ linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-08-05 19:44:33.000000000 -0400
1187 @@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str
1188 do_color_align = 0;
1189 if (filp || (flags & MAP_SHARED))
1190 do_color_align = 1;
1191 +
1192 +#ifdef CONFIG_PAX_RANDMMAP
1193 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1194 +#endif
1195 +
1196 if (addr) {
1197 if (do_color_align)
1198 addr = COLOUR_ALIGN(addr, pgoff);
1199 else
1200 addr = PAGE_ALIGN(addr);
1201 vmm = find_vma(current->mm, addr);
1202 - if (task_size - len >= addr &&
1203 - (!vmm || addr + len <= vmm->vm_start))
1204 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1205 return addr;
1206 }
1207 addr = current->mm->mmap_base;
1208 @@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str
1209 /* At this point: (!vmm || addr < vmm->vm_end). */
1210 if (task_size - len < addr)
1211 return -ENOMEM;
1212 - if (!vmm || addr + len <= vmm->vm_start)
1213 + if (check_heap_stack_gap(vmm, addr, len))
1214 return addr;
1215 addr = vmm->vm_end;
1216 if (do_color_align)
1217 @@ -154,33 +158,6 @@ void arch_pick_mmap_layout(struct mm_str
1218 mm->unmap_area = arch_unmap_area;
1219 }
1220
1221 -static inline unsigned long brk_rnd(void)
1222 -{
1223 - unsigned long rnd = get_random_int();
1224 -
1225 - rnd = rnd << PAGE_SHIFT;
1226 - /* 8MB for 32bit, 256MB for 64bit */
1227 - if (TASK_IS_32BIT_ADDR)
1228 - rnd = rnd & 0x7ffffful;
1229 - else
1230 - rnd = rnd & 0xffffffful;
1231 -
1232 - return rnd;
1233 -}
1234 -
1235 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1236 -{
1237 - unsigned long base = mm->brk;
1238 - unsigned long ret;
1239 -
1240 - ret = PAGE_ALIGN(base + brk_rnd());
1241 -
1242 - if (ret < mm->brk)
1243 - return mm->brk;
1244 -
1245 - return ret;
1246 -}
1247 -
1248 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
1249 unsigned long, prot, unsigned long, flags, unsigned long,
1250 fd, off_t, offset)
1251 diff -urNp linux-2.6.39.4/arch/mips/mm/fault.c linux-2.6.39.4/arch/mips/mm/fault.c
1252 --- linux-2.6.39.4/arch/mips/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1253 +++ linux-2.6.39.4/arch/mips/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1254 @@ -28,6 +28,23 @@
1255 #include <asm/highmem.h> /* For VMALLOC_END */
1256 #include <linux/kdebug.h>
1257
1258 +#ifdef CONFIG_PAX_PAGEEXEC
1259 +void pax_report_insns(void *pc, void *sp)
1260 +{
1261 + unsigned long i;
1262 +
1263 + printk(KERN_ERR "PAX: bytes at PC: ");
1264 + for (i = 0; i < 5; i++) {
1265 + unsigned int c;
1266 + if (get_user(c, (unsigned int *)pc+i))
1267 + printk(KERN_CONT "???????? ");
1268 + else
1269 + printk(KERN_CONT "%08x ", c);
1270 + }
1271 + printk("\n");
1272 +}
1273 +#endif
1274 +
1275 /*
1276 * This routine handles page faults. It determines the address,
1277 * and the problem, and then passes it off to one of the appropriate
1278 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/elf.h linux-2.6.39.4/arch/parisc/include/asm/elf.h
1279 --- linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1280 +++ linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1281 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1282
1283 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1284
1285 +#ifdef CONFIG_PAX_ASLR
1286 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1287 +
1288 +#define PAX_DELTA_MMAP_LEN 16
1289 +#define PAX_DELTA_STACK_LEN 16
1290 +#endif
1291 +
1292 /* This yields a mask that user programs can use to figure out what
1293 instruction set this CPU supports. This could be done in user space,
1294 but it's not easy, and we've already done it here. */
1295 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/pgtable.h linux-2.6.39.4/arch/parisc/include/asm/pgtable.h
1296 --- linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1297 +++ linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1298 @@ -207,6 +207,17 @@ struct vm_area_struct;
1299 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1300 #define PAGE_COPY PAGE_EXECREAD
1301 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1302 +
1303 +#ifdef CONFIG_PAX_PAGEEXEC
1304 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1305 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1306 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1307 +#else
1308 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1309 +# define PAGE_COPY_NOEXEC PAGE_COPY
1310 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1311 +#endif
1312 +
1313 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1314 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1315 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1316 diff -urNp linux-2.6.39.4/arch/parisc/kernel/module.c linux-2.6.39.4/arch/parisc/kernel/module.c
1317 --- linux-2.6.39.4/arch/parisc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
1318 +++ linux-2.6.39.4/arch/parisc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
1319 @@ -96,16 +96,38 @@
1320
1321 /* three functions to determine where in the module core
1322 * or init pieces the location is */
1323 +static inline int in_init_rx(struct module *me, void *loc)
1324 +{
1325 + return (loc >= me->module_init_rx &&
1326 + loc < (me->module_init_rx + me->init_size_rx));
1327 +}
1328 +
1329 +static inline int in_init_rw(struct module *me, void *loc)
1330 +{
1331 + return (loc >= me->module_init_rw &&
1332 + loc < (me->module_init_rw + me->init_size_rw));
1333 +}
1334 +
1335 static inline int in_init(struct module *me, void *loc)
1336 {
1337 - return (loc >= me->module_init &&
1338 - loc <= (me->module_init + me->init_size));
1339 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1340 +}
1341 +
1342 +static inline int in_core_rx(struct module *me, void *loc)
1343 +{
1344 + return (loc >= me->module_core_rx &&
1345 + loc < (me->module_core_rx + me->core_size_rx));
1346 +}
1347 +
1348 +static inline int in_core_rw(struct module *me, void *loc)
1349 +{
1350 + return (loc >= me->module_core_rw &&
1351 + loc < (me->module_core_rw + me->core_size_rw));
1352 }
1353
1354 static inline int in_core(struct module *me, void *loc)
1355 {
1356 - return (loc >= me->module_core &&
1357 - loc <= (me->module_core + me->core_size));
1358 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1359 }
1360
1361 static inline int in_local(struct module *me, void *loc)
1362 @@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
1363 }
1364
1365 /* align things a bit */
1366 - me->core_size = ALIGN(me->core_size, 16);
1367 - me->arch.got_offset = me->core_size;
1368 - me->core_size += gots * sizeof(struct got_entry);
1369 -
1370 - me->core_size = ALIGN(me->core_size, 16);
1371 - me->arch.fdesc_offset = me->core_size;
1372 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1373 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1374 + me->arch.got_offset = me->core_size_rw;
1375 + me->core_size_rw += gots * sizeof(struct got_entry);
1376 +
1377 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1378 + me->arch.fdesc_offset = me->core_size_rw;
1379 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1380
1381 me->arch.got_max = gots;
1382 me->arch.fdesc_max = fdescs;
1383 @@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
1384
1385 BUG_ON(value == 0);
1386
1387 - got = me->module_core + me->arch.got_offset;
1388 + got = me->module_core_rw + me->arch.got_offset;
1389 for (i = 0; got[i].addr; i++)
1390 if (got[i].addr == value)
1391 goto out;
1392 @@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
1393 #ifdef CONFIG_64BIT
1394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1395 {
1396 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1397 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1398
1399 if (!value) {
1400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1401 @@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
1402
1403 /* Create new one */
1404 fdesc->addr = value;
1405 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1406 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1407 return (Elf_Addr)fdesc;
1408 }
1409 #endif /* CONFIG_64BIT */
1410 @@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
1411
1412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1413 end = table + sechdrs[me->arch.unwind_section].sh_size;
1414 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1415 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1416
1417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1418 me->arch.unwind_section, table, end, gp);
1419 diff -urNp linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c
1420 --- linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-05-19 00:06:34.000000000 -0400
1421 +++ linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-08-05 19:44:33.000000000 -0400
1422 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1423 /* At this point: (!vma || addr < vma->vm_end). */
1424 if (TASK_SIZE - len < addr)
1425 return -ENOMEM;
1426 - if (!vma || addr + len <= vma->vm_start)
1427 + if (check_heap_stack_gap(vma, addr, len))
1428 return addr;
1429 addr = vma->vm_end;
1430 }
1431 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1432 /* At this point: (!vma || addr < vma->vm_end). */
1433 if (TASK_SIZE - len < addr)
1434 return -ENOMEM;
1435 - if (!vma || addr + len <= vma->vm_start)
1436 + if (check_heap_stack_gap(vma, addr, len))
1437 return addr;
1438 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1439 if (addr < vma->vm_end) /* handle wraparound */
1440 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1441 if (flags & MAP_FIXED)
1442 return addr;
1443 if (!addr)
1444 - addr = TASK_UNMAPPED_BASE;
1445 + addr = current->mm->mmap_base;
1446
1447 if (filp) {
1448 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1449 diff -urNp linux-2.6.39.4/arch/parisc/kernel/traps.c linux-2.6.39.4/arch/parisc/kernel/traps.c
1450 --- linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
1451 +++ linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
1452 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1453
1454 down_read(&current->mm->mmap_sem);
1455 vma = find_vma(current->mm,regs->iaoq[0]);
1456 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1457 - && (vma->vm_flags & VM_EXEC)) {
1458 -
1459 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1460 fault_address = regs->iaoq[0];
1461 fault_space = regs->iasq[0];
1462
1463 diff -urNp linux-2.6.39.4/arch/parisc/mm/fault.c linux-2.6.39.4/arch/parisc/mm/fault.c
1464 --- linux-2.6.39.4/arch/parisc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1465 +++ linux-2.6.39.4/arch/parisc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1466 @@ -15,6 +15,7 @@
1467 #include <linux/sched.h>
1468 #include <linux/interrupt.h>
1469 #include <linux/module.h>
1470 +#include <linux/unistd.h>
1471
1472 #include <asm/uaccess.h>
1473 #include <asm/traps.h>
1474 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1475 static unsigned long
1476 parisc_acctyp(unsigned long code, unsigned int inst)
1477 {
1478 - if (code == 6 || code == 16)
1479 + if (code == 6 || code == 7 || code == 16)
1480 return VM_EXEC;
1481
1482 switch (inst & 0xf0000000) {
1483 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1484 }
1485 #endif
1486
1487 +#ifdef CONFIG_PAX_PAGEEXEC
1488 +/*
1489 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1490 + *
1491 + * returns 1 when task should be killed
1492 + * 2 when rt_sigreturn trampoline was detected
1493 + * 3 when unpatched PLT trampoline was detected
1494 + */
1495 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1496 +{
1497 +
1498 +#ifdef CONFIG_PAX_EMUPLT
1499 + int err;
1500 +
1501 + do { /* PaX: unpatched PLT emulation */
1502 + unsigned int bl, depwi;
1503 +
1504 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1505 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1506 +
1507 + if (err)
1508 + break;
1509 +
1510 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1511 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1512 +
1513 + err = get_user(ldw, (unsigned int *)addr);
1514 + err |= get_user(bv, (unsigned int *)(addr+4));
1515 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1516 +
1517 + if (err)
1518 + break;
1519 +
1520 + if (ldw == 0x0E801096U &&
1521 + bv == 0xEAC0C000U &&
1522 + ldw2 == 0x0E881095U)
1523 + {
1524 + unsigned int resolver, map;
1525 +
1526 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1527 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1528 + if (err)
1529 + break;
1530 +
1531 + regs->gr[20] = instruction_pointer(regs)+8;
1532 + regs->gr[21] = map;
1533 + regs->gr[22] = resolver;
1534 + regs->iaoq[0] = resolver | 3UL;
1535 + regs->iaoq[1] = regs->iaoq[0] + 4;
1536 + return 3;
1537 + }
1538 + }
1539 + } while (0);
1540 +#endif
1541 +
1542 +#ifdef CONFIG_PAX_EMUTRAMP
1543 +
1544 +#ifndef CONFIG_PAX_EMUSIGRT
1545 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1546 + return 1;
1547 +#endif
1548 +
1549 + do { /* PaX: rt_sigreturn emulation */
1550 + unsigned int ldi1, ldi2, bel, nop;
1551 +
1552 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1553 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1554 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1555 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1556 +
1557 + if (err)
1558 + break;
1559 +
1560 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1561 + ldi2 == 0x3414015AU &&
1562 + bel == 0xE4008200U &&
1563 + nop == 0x08000240U)
1564 + {
1565 + regs->gr[25] = (ldi1 & 2) >> 1;
1566 + regs->gr[20] = __NR_rt_sigreturn;
1567 + regs->gr[31] = regs->iaoq[1] + 16;
1568 + regs->sr[0] = regs->iasq[1];
1569 + regs->iaoq[0] = 0x100UL;
1570 + regs->iaoq[1] = regs->iaoq[0] + 4;
1571 + regs->iasq[0] = regs->sr[2];
1572 + regs->iasq[1] = regs->sr[2];
1573 + return 2;
1574 + }
1575 + } while (0);
1576 +#endif
1577 +
1578 + return 1;
1579 +}
1580 +
1581 +void pax_report_insns(void *pc, void *sp)
1582 +{
1583 + unsigned long i;
1584 +
1585 + printk(KERN_ERR "PAX: bytes at PC: ");
1586 + for (i = 0; i < 5; i++) {
1587 + unsigned int c;
1588 + if (get_user(c, (unsigned int *)pc+i))
1589 + printk(KERN_CONT "???????? ");
1590 + else
1591 + printk(KERN_CONT "%08x ", c);
1592 + }
1593 + printk("\n");
1594 +}
1595 +#endif
1596 +
1597 int fixup_exception(struct pt_regs *regs)
1598 {
1599 const struct exception_table_entry *fix;
1600 @@ -192,8 +303,33 @@ good_area:
1601
1602 acc_type = parisc_acctyp(code,regs->iir);
1603
1604 - if ((vma->vm_flags & acc_type) != acc_type)
1605 + if ((vma->vm_flags & acc_type) != acc_type) {
1606 +
1607 +#ifdef CONFIG_PAX_PAGEEXEC
1608 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1609 + (address & ~3UL) == instruction_pointer(regs))
1610 + {
1611 + up_read(&mm->mmap_sem);
1612 + switch (pax_handle_fetch_fault(regs)) {
1613 +
1614 +#ifdef CONFIG_PAX_EMUPLT
1615 + case 3:
1616 + return;
1617 +#endif
1618 +
1619 +#ifdef CONFIG_PAX_EMUTRAMP
1620 + case 2:
1621 + return;
1622 +#endif
1623 +
1624 + }
1625 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1626 + do_group_exit(SIGKILL);
1627 + }
1628 +#endif
1629 +
1630 goto bad_area;
1631 + }
1632
1633 /*
1634 * If for any reason at all we couldn't handle the fault, make
1635 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/elf.h linux-2.6.39.4/arch/powerpc/include/asm/elf.h
1636 --- linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1637 +++ linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1638 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1639 the loader. We need to make sure that it is out of the way of the program
1640 that it will "exec", and that there is sufficient room for the brk. */
1641
1642 -extern unsigned long randomize_et_dyn(unsigned long base);
1643 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1644 +#define ELF_ET_DYN_BASE (0x20000000)
1645 +
1646 +#ifdef CONFIG_PAX_ASLR
1647 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1648 +
1649 +#ifdef __powerpc64__
1650 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1651 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1652 +#else
1653 +#define PAX_DELTA_MMAP_LEN 15
1654 +#define PAX_DELTA_STACK_LEN 15
1655 +#endif
1656 +#endif
1657
1658 /*
1659 * Our registers are always unsigned longs, whether we're a 32 bit
1660 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1661 (0x7ff >> (PAGE_SHIFT - 12)) : \
1662 (0x3ffff >> (PAGE_SHIFT - 12)))
1663
1664 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1665 -#define arch_randomize_brk arch_randomize_brk
1666 -
1667 #endif /* __KERNEL__ */
1668
1669 /*
1670 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h
1671 --- linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
1672 +++ linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
1673 @@ -27,6 +27,7 @@ enum km_type {
1674 KM_PPC_SYNC_PAGE,
1675 KM_PPC_SYNC_ICACHE,
1676 KM_KDB,
1677 + KM_CLEARPAGE,
1678 KM_TYPE_NR
1679 };
1680
1681 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page_64.h linux-2.6.39.4/arch/powerpc/include/asm/page_64.h
1682 --- linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-05-19 00:06:34.000000000 -0400
1683 +++ linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-08-05 19:44:33.000000000 -0400
1684 @@ -172,15 +172,18 @@ do { \
1685 * stack by default, so in the absence of a PT_GNU_STACK program header
1686 * we turn execute permission off.
1687 */
1688 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1689 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1690 +#define VM_STACK_DEFAULT_FLAGS32 \
1691 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1692 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1693
1694 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1695 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1696
1697 +#ifndef CONFIG_PAX_PAGEEXEC
1698 #define VM_STACK_DEFAULT_FLAGS \
1699 (is_32bit_task() ? \
1700 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1701 +#endif
1702
1703 #include <asm-generic/getorder.h>
1704
1705 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page.h linux-2.6.39.4/arch/powerpc/include/asm/page.h
1706 --- linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1707 +++ linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1708 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1709 * and needs to be executable. This means the whole heap ends
1710 * up being executable.
1711 */
1712 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1713 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1714 +#define VM_DATA_DEFAULT_FLAGS32 \
1715 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1716 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1717
1718 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1719 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1720 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1721 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1722 #endif
1723
1724 +#define ktla_ktva(addr) (addr)
1725 +#define ktva_ktla(addr) (addr)
1726 +
1727 #ifndef __ASSEMBLY__
1728
1729 #undef STRICT_MM_TYPECHECKS
1730 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h
1731 --- linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1732 +++ linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1733 @@ -2,6 +2,7 @@
1734 #define _ASM_POWERPC_PGTABLE_H
1735 #ifdef __KERNEL__
1736
1737 +#include <linux/const.h>
1738 #ifndef __ASSEMBLY__
1739 #include <asm/processor.h> /* For TASK_SIZE */
1740 #include <asm/mmu.h>
1741 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h
1742 --- linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-05-19 00:06:34.000000000 -0400
1743 +++ linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-05 19:44:33.000000000 -0400
1744 @@ -21,6 +21,7 @@
1745 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1746 #define _PAGE_USER 0x004 /* usermode access allowed */
1747 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1748 +#define _PAGE_EXEC _PAGE_GUARDED
1749 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1750 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1751 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1752 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/reg.h linux-2.6.39.4/arch/powerpc/include/asm/reg.h
1753 --- linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-05-19 00:06:34.000000000 -0400
1754 +++ linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-08-05 19:44:33.000000000 -0400
1755 @@ -201,6 +201,7 @@
1756 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1757 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1758 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1759 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1760 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1761 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1762 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1763 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/system.h linux-2.6.39.4/arch/powerpc/include/asm/system.h
1764 --- linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1765 +++ linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1766 @@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1767 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1768 #endif
1769
1770 -extern unsigned long arch_align_stack(unsigned long sp);
1771 +#define arch_align_stack(x) ((x) & ~0xfUL)
1772
1773 /* Used in very early kernel initialization. */
1774 extern unsigned long reloc_offset(void);
1775 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h
1776 --- linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
1777 +++ linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
1778 @@ -13,6 +13,8 @@
1779 #define VERIFY_READ 0
1780 #define VERIFY_WRITE 1
1781
1782 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1783 +
1784 /*
1785 * The fs value determines whether argument validity checking should be
1786 * performed or not. If get_fs() == USER_DS, checking is performed, with
1787 @@ -327,52 +329,6 @@ do { \
1788 extern unsigned long __copy_tofrom_user(void __user *to,
1789 const void __user *from, unsigned long size);
1790
1791 -#ifndef __powerpc64__
1792 -
1793 -static inline unsigned long copy_from_user(void *to,
1794 - const void __user *from, unsigned long n)
1795 -{
1796 - unsigned long over;
1797 -
1798 - if (access_ok(VERIFY_READ, from, n))
1799 - return __copy_tofrom_user((__force void __user *)to, from, n);
1800 - if ((unsigned long)from < TASK_SIZE) {
1801 - over = (unsigned long)from + n - TASK_SIZE;
1802 - return __copy_tofrom_user((__force void __user *)to, from,
1803 - n - over) + over;
1804 - }
1805 - return n;
1806 -}
1807 -
1808 -static inline unsigned long copy_to_user(void __user *to,
1809 - const void *from, unsigned long n)
1810 -{
1811 - unsigned long over;
1812 -
1813 - if (access_ok(VERIFY_WRITE, to, n))
1814 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1815 - if ((unsigned long)to < TASK_SIZE) {
1816 - over = (unsigned long)to + n - TASK_SIZE;
1817 - return __copy_tofrom_user(to, (__force void __user *)from,
1818 - n - over) + over;
1819 - }
1820 - return n;
1821 -}
1822 -
1823 -#else /* __powerpc64__ */
1824 -
1825 -#define __copy_in_user(to, from, size) \
1826 - __copy_tofrom_user((to), (from), (size))
1827 -
1828 -extern unsigned long copy_from_user(void *to, const void __user *from,
1829 - unsigned long n);
1830 -extern unsigned long copy_to_user(void __user *to, const void *from,
1831 - unsigned long n);
1832 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1833 - unsigned long n);
1834 -
1835 -#endif /* __powerpc64__ */
1836 -
1837 static inline unsigned long __copy_from_user_inatomic(void *to,
1838 const void __user *from, unsigned long n)
1839 {
1840 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1841 if (ret == 0)
1842 return 0;
1843 }
1844 +
1845 + if (!__builtin_constant_p(n))
1846 + check_object_size(to, n, false);
1847 +
1848 return __copy_tofrom_user((__force void __user *)to, from, n);
1849 }
1850
1851 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1852 if (ret == 0)
1853 return 0;
1854 }
1855 +
1856 + if (!__builtin_constant_p(n))
1857 + check_object_size(from, n, true);
1858 +
1859 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1860 }
1861
1862 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1863 return __copy_to_user_inatomic(to, from, size);
1864 }
1865
1866 +#ifndef __powerpc64__
1867 +
1868 +static inline unsigned long __must_check copy_from_user(void *to,
1869 + const void __user *from, unsigned long n)
1870 +{
1871 + unsigned long over;
1872 +
1873 + if ((long)n < 0)
1874 + return n;
1875 +
1876 + if (access_ok(VERIFY_READ, from, n)) {
1877 + if (!__builtin_constant_p(n))
1878 + check_object_size(to, n, false);
1879 + return __copy_tofrom_user((__force void __user *)to, from, n);
1880 + }
1881 + if ((unsigned long)from < TASK_SIZE) {
1882 + over = (unsigned long)from + n - TASK_SIZE;
1883 + if (!__builtin_constant_p(n - over))
1884 + check_object_size(to, n - over, false);
1885 + return __copy_tofrom_user((__force void __user *)to, from,
1886 + n - over) + over;
1887 + }
1888 + return n;
1889 +}
1890 +
1891 +static inline unsigned long __must_check copy_to_user(void __user *to,
1892 + const void *from, unsigned long n)
1893 +{
1894 + unsigned long over;
1895 +
1896 + if ((long)n < 0)
1897 + return n;
1898 +
1899 + if (access_ok(VERIFY_WRITE, to, n)) {
1900 + if (!__builtin_constant_p(n))
1901 + check_object_size(from, n, true);
1902 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1903 + }
1904 + if ((unsigned long)to < TASK_SIZE) {
1905 + over = (unsigned long)to + n - TASK_SIZE;
1906 + if (!__builtin_constant_p(n))
1907 + check_object_size(from, n - over, true);
1908 + return __copy_tofrom_user(to, (__force void __user *)from,
1909 + n - over) + over;
1910 + }
1911 + return n;
1912 +}
1913 +
1914 +#else /* __powerpc64__ */
1915 +
1916 +#define __copy_in_user(to, from, size) \
1917 + __copy_tofrom_user((to), (from), (size))
1918 +
1919 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1920 +{
1921 + if ((long)n < 0 || n > INT_MAX)
1922 + return n;
1923 +
1924 + if (!__builtin_constant_p(n))
1925 + check_object_size(to, n, false);
1926 +
1927 + if (likely(access_ok(VERIFY_READ, from, n)))
1928 + n = __copy_from_user(to, from, n);
1929 + else
1930 + memset(to, 0, n);
1931 + return n;
1932 +}
1933 +
1934 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1935 +{
1936 + if ((long)n < 0 || n > INT_MAX)
1937 + return n;
1938 +
1939 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1940 + if (!__builtin_constant_p(n))
1941 + check_object_size(from, n, true);
1942 + n = __copy_to_user(to, from, n);
1943 + }
1944 + return n;
1945 +}
1946 +
1947 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1948 + unsigned long n);
1949 +
1950 +#endif /* __powerpc64__ */
1951 +
1952 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1953
1954 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1955 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S
1956 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-05-19 00:06:34.000000000 -0400
1957 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-05 19:44:33.000000000 -0400
1958 @@ -495,6 +495,7 @@ storage_fault_common:
1959 std r14,_DAR(r1)
1960 std r15,_DSISR(r1)
1961 addi r3,r1,STACK_FRAME_OVERHEAD
1962 + bl .save_nvgprs
1963 mr r4,r14
1964 mr r5,r15
1965 ld r14,PACA_EXGEN+EX_R14(r13)
1966 @@ -504,8 +505,7 @@ storage_fault_common:
1967 cmpdi r3,0
1968 bne- 1f
1969 b .ret_from_except_lite
1970 -1: bl .save_nvgprs
1971 - mr r5,r3
1972 +1: mr r5,r3
1973 addi r3,r1,STACK_FRAME_OVERHEAD
1974 ld r4,_DAR(r1)
1975 bl .bad_page_fault
1976 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S
1977 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-05-19 00:06:34.000000000 -0400
1978 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-05 19:44:33.000000000 -0400
1979 @@ -848,10 +848,10 @@ handle_page_fault:
1980 11: ld r4,_DAR(r1)
1981 ld r5,_DSISR(r1)
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 + bl .save_nvgprs
1984 bl .do_page_fault
1985 cmpdi r3,0
1986 beq+ 13f
1987 - bl .save_nvgprs
1988 mr r5,r3
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990 lwz r4,_DAR(r1)
1991 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module_32.c linux-2.6.39.4/arch/powerpc/kernel/module_32.c
1992 --- linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-05-19 00:06:34.000000000 -0400
1993 +++ linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-08-05 19:44:33.000000000 -0400
1994 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
1995 me->arch.core_plt_section = i;
1996 }
1997 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
1998 - printk("Module doesn't contain .plt or .init.plt sections.\n");
1999 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2000 return -ENOEXEC;
2001 }
2002
2003 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2004
2005 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2006 /* Init, or core PLT? */
2007 - if (location >= mod->module_core
2008 - && location < mod->module_core + mod->core_size)
2009 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2010 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2011 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2012 - else
2013 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2014 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2015 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2016 + else {
2017 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2018 + return ~0UL;
2019 + }
2020
2021 /* Find this entry, or if that fails, the next avail. entry */
2022 while (entry->jump[0]) {
2023 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module.c linux-2.6.39.4/arch/powerpc/kernel/module.c
2024 --- linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2025 +++ linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2026 @@ -31,11 +31,24 @@
2027
2028 LIST_HEAD(module_bug_list);
2029
2030 +#ifdef CONFIG_PAX_KERNEXEC
2031 void *module_alloc(unsigned long size)
2032 {
2033 if (size == 0)
2034 return NULL;
2035
2036 + return vmalloc(size);
2037 +}
2038 +
2039 +void *module_alloc_exec(unsigned long size)
2040 +#else
2041 +void *module_alloc(unsigned long size)
2042 +#endif
2043 +
2044 +{
2045 + if (size == 0)
2046 + return NULL;
2047 +
2048 return vmalloc_exec(size);
2049 }
2050
2051 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2052 vfree(module_region);
2053 }
2054
2055 +#ifdef CONFIG_PAX_KERNEXEC
2056 +void module_free_exec(struct module *mod, void *module_region)
2057 +{
2058 + module_free(mod, module_region);
2059 +}
2060 +#endif
2061 +
2062 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2063 const Elf_Shdr *sechdrs,
2064 const char *name)
2065 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/process.c linux-2.6.39.4/arch/powerpc/kernel/process.c
2066 --- linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2067 +++ linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2068 @@ -655,8 +655,8 @@ void show_regs(struct pt_regs * regs)
2069 * Lookup NIP late so we have the best change of getting the
2070 * above info out without failing
2071 */
2072 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2073 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2074 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2075 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2076 #endif
2077 show_stack(current, (unsigned long *) regs->gpr[1]);
2078 if (!user_mode(regs))
2079 @@ -1146,10 +1146,10 @@ void show_stack(struct task_struct *tsk,
2080 newsp = stack[0];
2081 ip = stack[STACK_FRAME_LR_SAVE];
2082 if (!firstframe || ip != lr) {
2083 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2084 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2085 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2086 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2087 - printk(" (%pS)",
2088 + printk(" (%pA)",
2089 (void *)current->ret_stack[curr_frame].ret);
2090 curr_frame--;
2091 }
2092 @@ -1169,7 +1169,7 @@ void show_stack(struct task_struct *tsk,
2093 struct pt_regs *regs = (struct pt_regs *)
2094 (sp + STACK_FRAME_OVERHEAD);
2095 lr = regs->link;
2096 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2097 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2098 regs->trap, (void *)regs->nip, (void *)lr);
2099 firstframe = 1;
2100 }
2101 @@ -1244,58 +1244,3 @@ void thread_info_cache_init(void)
2102 }
2103
2104 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2105 -
2106 -unsigned long arch_align_stack(unsigned long sp)
2107 -{
2108 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2109 - sp -= get_random_int() & ~PAGE_MASK;
2110 - return sp & ~0xf;
2111 -}
2112 -
2113 -static inline unsigned long brk_rnd(void)
2114 -{
2115 - unsigned long rnd = 0;
2116 -
2117 - /* 8MB for 32bit, 1GB for 64bit */
2118 - if (is_32bit_task())
2119 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2120 - else
2121 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2122 -
2123 - return rnd << PAGE_SHIFT;
2124 -}
2125 -
2126 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2127 -{
2128 - unsigned long base = mm->brk;
2129 - unsigned long ret;
2130 -
2131 -#ifdef CONFIG_PPC_STD_MMU_64
2132 - /*
2133 - * If we are using 1TB segments and we are allowed to randomise
2134 - * the heap, we can put it above 1TB so it is backed by a 1TB
2135 - * segment. Otherwise the heap will be in the bottom 1TB
2136 - * which always uses 256MB segments and this may result in a
2137 - * performance penalty.
2138 - */
2139 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2140 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2141 -#endif
2142 -
2143 - ret = PAGE_ALIGN(base + brk_rnd());
2144 -
2145 - if (ret < mm->brk)
2146 - return mm->brk;
2147 -
2148 - return ret;
2149 -}
2150 -
2151 -unsigned long randomize_et_dyn(unsigned long base)
2152 -{
2153 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2154 -
2155 - if (ret < base)
2156 - return base;
2157 -
2158 - return ret;
2159 -}
2160 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_32.c linux-2.6.39.4/arch/powerpc/kernel/signal_32.c
2161 --- linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-05-19 00:06:34.000000000 -0400
2162 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-08-05 19:44:33.000000000 -0400
2163 @@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig
2164 /* Save user registers on the stack */
2165 frame = &rt_sf->uc.uc_mcontext;
2166 addr = frame;
2167 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2168 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2169 if (save_user_regs(regs, frame, 0, 1))
2170 goto badframe;
2171 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2172 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_64.c linux-2.6.39.4/arch/powerpc/kernel/signal_64.c
2173 --- linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-05-19 00:06:34.000000000 -0400
2174 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-08-05 19:44:33.000000000 -0400
2175 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2176 current->thread.fpscr.val = 0;
2177
2178 /* Set up to return from userspace. */
2179 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2180 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2181 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2182 } else {
2183 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2184 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/traps.c linux-2.6.39.4/arch/powerpc/kernel/traps.c
2185 --- linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
2186 +++ linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
2187 @@ -96,6 +96,8 @@ static void pmac_backlight_unblank(void)
2188 static inline void pmac_backlight_unblank(void) { }
2189 #endif
2190
2191 +extern void gr_handle_kernel_exploit(void);
2192 +
2193 int die(const char *str, struct pt_regs *regs, long err)
2194 {
2195 static struct {
2196 @@ -170,6 +172,8 @@ int die(const char *str, struct pt_regs
2197 if (panic_on_oops)
2198 panic("Fatal exception");
2199
2200 + gr_handle_kernel_exploit();
2201 +
2202 oops_exit();
2203 do_exit(err);
2204
2205 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/vdso.c linux-2.6.39.4/arch/powerpc/kernel/vdso.c
2206 --- linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-05-19 00:06:34.000000000 -0400
2207 +++ linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-08-05 19:44:33.000000000 -0400
2208 @@ -36,6 +36,7 @@
2209 #include <asm/firmware.h>
2210 #include <asm/vdso.h>
2211 #include <asm/vdso_datapage.h>
2212 +#include <asm/mman.h>
2213
2214 #include "setup.h"
2215
2216 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2217 vdso_base = VDSO32_MBASE;
2218 #endif
2219
2220 - current->mm->context.vdso_base = 0;
2221 + current->mm->context.vdso_base = ~0UL;
2222
2223 /* vDSO has a problem and was disabled, just don't "enable" it for the
2224 * process
2225 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = get_unmapped_area(NULL, vdso_base,
2227 (vdso_pages << PAGE_SHIFT) +
2228 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2229 - 0, 0);
2230 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2231 if (IS_ERR_VALUE(vdso_base)) {
2232 rc = vdso_base;
2233 goto fail_mmapsem;
2234 diff -urNp linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c
2235 --- linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
2236 +++ linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-08-05 19:44:33.000000000 -0400
2237 @@ -9,22 +9,6 @@
2238 #include <linux/module.h>
2239 #include <asm/uaccess.h>
2240
2241 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2242 -{
2243 - if (likely(access_ok(VERIFY_READ, from, n)))
2244 - n = __copy_from_user(to, from, n);
2245 - else
2246 - memset(to, 0, n);
2247 - return n;
2248 -}
2249 -
2250 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2253 - n = __copy_to_user(to, from, n);
2254 - return n;
2255 -}
2256 -
2257 unsigned long copy_in_user(void __user *to, const void __user *from,
2258 unsigned long n)
2259 {
2260 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2261 return n;
2262 }
2263
2264 -EXPORT_SYMBOL(copy_from_user);
2265 -EXPORT_SYMBOL(copy_to_user);
2266 EXPORT_SYMBOL(copy_in_user);
2267
2268 diff -urNp linux-2.6.39.4/arch/powerpc/mm/fault.c linux-2.6.39.4/arch/powerpc/mm/fault.c
2269 --- linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
2270 +++ linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
2271 @@ -31,6 +31,10 @@
2272 #include <linux/kdebug.h>
2273 #include <linux/perf_event.h>
2274 #include <linux/magic.h>
2275 +#include <linux/slab.h>
2276 +#include <linux/pagemap.h>
2277 +#include <linux/compiler.h>
2278 +#include <linux/unistd.h>
2279
2280 #include <asm/firmware.h>
2281 #include <asm/page.h>
2282 @@ -42,6 +46,7 @@
2283 #include <asm/tlbflush.h>
2284 #include <asm/siginfo.h>
2285 #include <mm/mmu_decl.h>
2286 +#include <asm/ptrace.h>
2287
2288 #ifdef CONFIG_KPROBES
2289 static inline int notify_page_fault(struct pt_regs *regs)
2290 @@ -65,6 +70,33 @@ static inline int notify_page_fault(stru
2291 }
2292 #endif
2293
2294 +#ifdef CONFIG_PAX_PAGEEXEC
2295 +/*
2296 + * PaX: decide what to do with offenders (regs->nip = fault address)
2297 + *
2298 + * returns 1 when task should be killed
2299 + */
2300 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2301 +{
2302 + return 1;
2303 +}
2304 +
2305 +void pax_report_insns(void *pc, void *sp)
2306 +{
2307 + unsigned long i;
2308 +
2309 + printk(KERN_ERR "PAX: bytes at PC: ");
2310 + for (i = 0; i < 5; i++) {
2311 + unsigned int c;
2312 + if (get_user(c, (unsigned int __user *)pc+i))
2313 + printk(KERN_CONT "???????? ");
2314 + else
2315 + printk(KERN_CONT "%08x ", c);
2316 + }
2317 + printk("\n");
2318 +}
2319 +#endif
2320 +
2321 /*
2322 * Check whether the instruction at regs->nip is a store using
2323 * an update addressing form which will update r1.
2324 @@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re
2325 * indicate errors in DSISR but can validly be set in SRR1.
2326 */
2327 if (trap == 0x400)
2328 - error_code &= 0x48200000;
2329 + error_code &= 0x58200000;
2330 else
2331 is_write = error_code & DSISR_ISSTORE;
2332 #else
2333 @@ -258,7 +290,7 @@ good_area:
2334 * "undefined". Of those that can be set, this is the only
2335 * one which seems bad.
2336 */
2337 - if (error_code & 0x10000000)
2338 + if (error_code & DSISR_GUARDED)
2339 /* Guarded storage error. */
2340 goto bad_area;
2341 #endif /* CONFIG_8xx */
2342 @@ -273,7 +305,7 @@ good_area:
2343 * processors use the same I/D cache coherency mechanism
2344 * as embedded.
2345 */
2346 - if (error_code & DSISR_PROTFAULT)
2347 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2348 goto bad_area;
2349 #endif /* CONFIG_PPC_STD_MMU */
2350
2351 @@ -342,6 +374,23 @@ bad_area:
2352 bad_area_nosemaphore:
2353 /* User mode accesses cause a SIGSEGV */
2354 if (user_mode(regs)) {
2355 +
2356 +#ifdef CONFIG_PAX_PAGEEXEC
2357 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2358 +#ifdef CONFIG_PPC_STD_MMU
2359 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2360 +#else
2361 + if (is_exec && regs->nip == address) {
2362 +#endif
2363 + switch (pax_handle_fetch_fault(regs)) {
2364 + }
2365 +
2366 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2367 + do_group_exit(SIGKILL);
2368 + }
2369 + }
2370 +#endif
2371 +
2372 _exception(SIGSEGV, regs, code, address);
2373 return 0;
2374 }
2375 diff -urNp linux-2.6.39.4/arch/powerpc/mm/mmap_64.c linux-2.6.39.4/arch/powerpc/mm/mmap_64.c
2376 --- linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-05-19 00:06:34.000000000 -0400
2377 +++ linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-08-05 19:44:33.000000000 -0400
2378 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2379 */
2380 if (mmap_is_legacy()) {
2381 mm->mmap_base = TASK_UNMAPPED_BASE;
2382 +
2383 +#ifdef CONFIG_PAX_RANDMMAP
2384 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2385 + mm->mmap_base += mm->delta_mmap;
2386 +#endif
2387 +
2388 mm->get_unmapped_area = arch_get_unmapped_area;
2389 mm->unmap_area = arch_unmap_area;
2390 } else {
2391 mm->mmap_base = mmap_base();
2392 +
2393 +#ifdef CONFIG_PAX_RANDMMAP
2394 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2395 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2396 +#endif
2397 +
2398 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2399 mm->unmap_area = arch_unmap_area_topdown;
2400 }
2401 diff -urNp linux-2.6.39.4/arch/powerpc/mm/slice.c linux-2.6.39.4/arch/powerpc/mm/slice.c
2402 --- linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-05-19 00:06:34.000000000 -0400
2403 +++ linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-08-05 19:44:33.000000000 -0400
2404 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2405 if ((mm->task_size - len) < addr)
2406 return 0;
2407 vma = find_vma(mm, addr);
2408 - return (!vma || (addr + len) <= vma->vm_start);
2409 + return check_heap_stack_gap(vma, addr, len);
2410 }
2411
2412 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2413 @@ -256,7 +256,7 @@ full_search:
2414 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2415 continue;
2416 }
2417 - if (!vma || addr + len <= vma->vm_start) {
2418 + if (check_heap_stack_gap(vma, addr, len)) {
2419 /*
2420 * Remember the place where we stopped the search:
2421 */
2422 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2423 }
2424 }
2425
2426 - addr = mm->mmap_base;
2427 - while (addr > len) {
2428 + if (mm->mmap_base < len)
2429 + addr = -ENOMEM;
2430 + else
2431 + addr = mm->mmap_base - len;
2432 +
2433 + while (!IS_ERR_VALUE(addr)) {
2434 /* Go down by chunk size */
2435 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2436 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2437
2438 /* Check for hit with different page size */
2439 mask = slice_range_to_mask(addr, len);
2440 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2441 * return with success:
2442 */
2443 vma = find_vma(mm, addr);
2444 - if (!vma || (addr + len) <= vma->vm_start) {
2445 + if (check_heap_stack_gap(vma, addr, len)) {
2446 /* remember the address as a hint for next time */
2447 if (use_cache)
2448 mm->free_area_cache = addr;
2449 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2450 mm->cached_hole_size = vma->vm_start - addr;
2451
2452 /* try just below the current vma->vm_start */
2453 - addr = vma->vm_start;
2454 + addr = skip_heap_stack_gap(vma, len);
2455 }
2456
2457 /*
2458 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2459 if (fixed && addr > (mm->task_size - len))
2460 return -EINVAL;
2461
2462 +#ifdef CONFIG_PAX_RANDMMAP
2463 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2464 + addr = 0;
2465 +#endif
2466 +
2467 /* If hint, make sure it matches our alignment restrictions */
2468 if (!fixed && addr) {
2469 addr = _ALIGN_UP(addr, 1ul << pshift);
2470 diff -urNp linux-2.6.39.4/arch/s390/include/asm/elf.h linux-2.6.39.4/arch/s390/include/asm/elf.h
2471 --- linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
2472 +++ linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
2473 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2474 the loader. We need to make sure that it is out of the way of the program
2475 that it will "exec", and that there is sufficient room for the brk. */
2476
2477 -extern unsigned long randomize_et_dyn(unsigned long base);
2478 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2479 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2480 +
2481 +#ifdef CONFIG_PAX_ASLR
2482 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2483 +
2484 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2485 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2486 +#endif
2487
2488 /* This yields a mask that user programs can use to figure out what
2489 instruction set this CPU supports. */
2490 @@ -222,7 +228,4 @@ struct linux_binprm;
2491 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2492 int arch_setup_additional_pages(struct linux_binprm *, int);
2493
2494 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2495 -#define arch_randomize_brk arch_randomize_brk
2496 -
2497 #endif
2498 diff -urNp linux-2.6.39.4/arch/s390/include/asm/system.h linux-2.6.39.4/arch/s390/include/asm/system.h
2499 --- linux-2.6.39.4/arch/s390/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2500 +++ linux-2.6.39.4/arch/s390/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2501 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2502 extern void (*_machine_halt)(void);
2503 extern void (*_machine_power_off)(void);
2504
2505 -extern unsigned long arch_align_stack(unsigned long sp);
2506 +#define arch_align_stack(x) ((x) & ~0xfUL)
2507
2508 static inline int tprot(unsigned long addr)
2509 {
2510 diff -urNp linux-2.6.39.4/arch/s390/include/asm/uaccess.h linux-2.6.39.4/arch/s390/include/asm/uaccess.h
2511 --- linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
2512 +++ linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
2513 @@ -234,6 +234,10 @@ static inline unsigned long __must_check
2514 copy_to_user(void __user *to, const void *from, unsigned long n)
2515 {
2516 might_fault();
2517 +
2518 + if ((long)n < 0)
2519 + return n;
2520 +
2521 if (access_ok(VERIFY_WRITE, to, n))
2522 n = __copy_to_user(to, from, n);
2523 return n;
2524 @@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
2525 static inline unsigned long __must_check
2526 __copy_from_user(void *to, const void __user *from, unsigned long n)
2527 {
2528 + if ((long)n < 0)
2529 + return n;
2530 +
2531 if (__builtin_constant_p(n) && (n <= 256))
2532 return uaccess.copy_from_user_small(n, from, to);
2533 else
2534 @@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
2535 unsigned int sz = __compiletime_object_size(to);
2536
2537 might_fault();
2538 +
2539 + if ((long)n < 0)
2540 + return n;
2541 +
2542 if (unlikely(sz != -1 && sz < n)) {
2543 copy_from_user_overflow();
2544 return n;
2545 diff -urNp linux-2.6.39.4/arch/s390/Kconfig linux-2.6.39.4/arch/s390/Kconfig
2546 --- linux-2.6.39.4/arch/s390/Kconfig 2011-05-19 00:06:34.000000000 -0400
2547 +++ linux-2.6.39.4/arch/s390/Kconfig 2011-08-05 19:44:33.000000000 -0400
2548 @@ -234,11 +234,9 @@ config S390_EXEC_PROTECT
2549 prompt "Data execute protection"
2550 help
2551 This option allows to enable a buffer overflow protection for user
2552 - space programs and it also selects the addressing mode option above.
2553 - The kernel parameter noexec=on will enable this feature and also
2554 - switch the addressing modes, default is disabled. Enabling this (via
2555 - kernel parameter) on machines earlier than IBM System z9 this will
2556 - reduce system performance.
2557 + space programs.
2558 + Enabling this (via kernel parameter) on machines earlier than IBM
2559 + System z9 this will reduce system performance.
2560
2561 comment "Code generation options"
2562
2563 diff -urNp linux-2.6.39.4/arch/s390/kernel/module.c linux-2.6.39.4/arch/s390/kernel/module.c
2564 --- linux-2.6.39.4/arch/s390/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2565 +++ linux-2.6.39.4/arch/s390/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2566 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2567
2568 /* Increase core size by size of got & plt and set start
2569 offsets for got and plt. */
2570 - me->core_size = ALIGN(me->core_size, 4);
2571 - me->arch.got_offset = me->core_size;
2572 - me->core_size += me->arch.got_size;
2573 - me->arch.plt_offset = me->core_size;
2574 - me->core_size += me->arch.plt_size;
2575 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2576 + me->arch.got_offset = me->core_size_rw;
2577 + me->core_size_rw += me->arch.got_size;
2578 + me->arch.plt_offset = me->core_size_rx;
2579 + me->core_size_rx += me->arch.plt_size;
2580 return 0;
2581 }
2582
2583 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 if (info->got_initialized == 0) {
2585 Elf_Addr *gotent;
2586
2587 - gotent = me->module_core + me->arch.got_offset +
2588 + gotent = me->module_core_rw + me->arch.got_offset +
2589 info->got_offset;
2590 *gotent = val;
2591 info->got_initialized = 1;
2592 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 else if (r_type == R_390_GOTENT ||
2594 r_type == R_390_GOTPLTENT)
2595 *(unsigned int *) loc =
2596 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2597 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2598 else if (r_type == R_390_GOT64 ||
2599 r_type == R_390_GOTPLT64)
2600 *(unsigned long *) loc = val;
2601 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2603 if (info->plt_initialized == 0) {
2604 unsigned int *ip;
2605 - ip = me->module_core + me->arch.plt_offset +
2606 + ip = me->module_core_rx + me->arch.plt_offset +
2607 info->plt_offset;
2608 #ifndef CONFIG_64BIT
2609 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2610 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 val - loc + 0xffffUL < 0x1ffffeUL) ||
2612 (r_type == R_390_PLT32DBL &&
2613 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2614 - val = (Elf_Addr) me->module_core +
2615 + val = (Elf_Addr) me->module_core_rx +
2616 me->arch.plt_offset +
2617 info->plt_offset;
2618 val += rela->r_addend - loc;
2619 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2621 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2622 val = val + rela->r_addend -
2623 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2624 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2625 if (r_type == R_390_GOTOFF16)
2626 *(unsigned short *) loc = val;
2627 else if (r_type == R_390_GOTOFF32)
2628 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2629 break;
2630 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2631 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2632 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2633 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2634 rela->r_addend - loc;
2635 if (r_type == R_390_GOTPC)
2636 *(unsigned int *) loc = val;
2637 diff -urNp linux-2.6.39.4/arch/s390/kernel/process.c linux-2.6.39.4/arch/s390/kernel/process.c
2638 --- linux-2.6.39.4/arch/s390/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2639 +++ linux-2.6.39.4/arch/s390/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2640 @@ -334,39 +334,3 @@ unsigned long get_wchan(struct task_stru
2641 }
2642 return 0;
2643 }
2644 -
2645 -unsigned long arch_align_stack(unsigned long sp)
2646 -{
2647 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2648 - sp -= get_random_int() & ~PAGE_MASK;
2649 - return sp & ~0xf;
2650 -}
2651 -
2652 -static inline unsigned long brk_rnd(void)
2653 -{
2654 - /* 8MB for 32bit, 1GB for 64bit */
2655 - if (is_32bit_task())
2656 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2657 - else
2658 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2659 -}
2660 -
2661 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2664 -
2665 - if (ret < mm->brk)
2666 - return mm->brk;
2667 - return ret;
2668 -}
2669 -
2670 -unsigned long randomize_et_dyn(unsigned long base)
2671 -{
2672 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2673 -
2674 - if (!(current->flags & PF_RANDOMIZE))
2675 - return base;
2676 - if (ret < base)
2677 - return base;
2678 - return ret;
2679 -}
2680 diff -urNp linux-2.6.39.4/arch/s390/kernel/setup.c linux-2.6.39.4/arch/s390/kernel/setup.c
2681 --- linux-2.6.39.4/arch/s390/kernel/setup.c 2011-05-19 00:06:34.000000000 -0400
2682 +++ linux-2.6.39.4/arch/s390/kernel/setup.c 2011-08-05 19:44:33.000000000 -0400
2683 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2684 }
2685 early_param("mem", early_parse_mem);
2686
2687 -unsigned int user_mode = HOME_SPACE_MODE;
2688 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2689 EXPORT_SYMBOL_GPL(user_mode);
2690
2691 static int set_amode_and_uaccess(unsigned long user_amode,
2692 @@ -300,17 +300,6 @@ static int set_amode_and_uaccess(unsigne
2693 }
2694 }
2695
2696 -/*
2697 - * Switch kernel/user addressing modes?
2698 - */
2699 -static int __init early_parse_switch_amode(char *p)
2700 -{
2701 - if (user_mode != SECONDARY_SPACE_MODE)
2702 - user_mode = PRIMARY_SPACE_MODE;
2703 - return 0;
2704 -}
2705 -early_param("switch_amode", early_parse_switch_amode);
2706 -
2707 static int __init early_parse_user_mode(char *p)
2708 {
2709 if (p && strcmp(p, "primary") == 0)
2710 @@ -327,20 +316,6 @@ static int __init early_parse_user_mode(
2711 }
2712 early_param("user_mode", early_parse_user_mode);
2713
2714 -#ifdef CONFIG_S390_EXEC_PROTECT
2715 -/*
2716 - * Enable execute protection?
2717 - */
2718 -static int __init early_parse_noexec(char *p)
2719 -{
2720 - if (!strncmp(p, "off", 3))
2721 - return 0;
2722 - user_mode = SECONDARY_SPACE_MODE;
2723 - return 0;
2724 -}
2725 -early_param("noexec", early_parse_noexec);
2726 -#endif /* CONFIG_S390_EXEC_PROTECT */
2727 -
2728 static void setup_addressing_mode(void)
2729 {
2730 if (user_mode == SECONDARY_SPACE_MODE) {
2731 diff -urNp linux-2.6.39.4/arch/s390/mm/mmap.c linux-2.6.39.4/arch/s390/mm/mmap.c
2732 --- linux-2.6.39.4/arch/s390/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2733 +++ linux-2.6.39.4/arch/s390/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2734 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2735 */
2736 if (mmap_is_legacy()) {
2737 mm->mmap_base = TASK_UNMAPPED_BASE;
2738 +
2739 +#ifdef CONFIG_PAX_RANDMMAP
2740 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2741 + mm->mmap_base += mm->delta_mmap;
2742 +#endif
2743 +
2744 mm->get_unmapped_area = arch_get_unmapped_area;
2745 mm->unmap_area = arch_unmap_area;
2746 } else {
2747 mm->mmap_base = mmap_base();
2748 +
2749 +#ifdef CONFIG_PAX_RANDMMAP
2750 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2751 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2752 +#endif
2753 +
2754 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2755 mm->unmap_area = arch_unmap_area_topdown;
2756 }
2757 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2758 */
2759 if (mmap_is_legacy()) {
2760 mm->mmap_base = TASK_UNMAPPED_BASE;
2761 +
2762 +#ifdef CONFIG_PAX_RANDMMAP
2763 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2764 + mm->mmap_base += mm->delta_mmap;
2765 +#endif
2766 +
2767 mm->get_unmapped_area = s390_get_unmapped_area;
2768 mm->unmap_area = arch_unmap_area;
2769 } else {
2770 mm->mmap_base = mmap_base();
2771 +
2772 +#ifdef CONFIG_PAX_RANDMMAP
2773 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2774 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2775 +#endif
2776 +
2777 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2778 mm->unmap_area = arch_unmap_area_topdown;
2779 }
2780 diff -urNp linux-2.6.39.4/arch/score/include/asm/system.h linux-2.6.39.4/arch/score/include/asm/system.h
2781 --- linux-2.6.39.4/arch/score/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2782 +++ linux-2.6.39.4/arch/score/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2783 @@ -17,7 +17,7 @@ do { \
2784 #define finish_arch_switch(prev) do {} while (0)
2785
2786 typedef void (*vi_handler_t)(void);
2787 -extern unsigned long arch_align_stack(unsigned long sp);
2788 +#define arch_align_stack(x) (x)
2789
2790 #define mb() barrier()
2791 #define rmb() barrier()
2792 diff -urNp linux-2.6.39.4/arch/score/kernel/process.c linux-2.6.39.4/arch/score/kernel/process.c
2793 --- linux-2.6.39.4/arch/score/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2794 +++ linux-2.6.39.4/arch/score/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2795 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2796
2797 return task_pt_regs(task)->cp0_epc;
2798 }
2799 -
2800 -unsigned long arch_align_stack(unsigned long sp)
2801 -{
2802 - return sp;
2803 -}
2804 diff -urNp linux-2.6.39.4/arch/sh/mm/mmap.c linux-2.6.39.4/arch/sh/mm/mmap.c
2805 --- linux-2.6.39.4/arch/sh/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2806 +++ linux-2.6.39.4/arch/sh/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2807 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2808 addr = PAGE_ALIGN(addr);
2809
2810 vma = find_vma(mm, addr);
2811 - if (TASK_SIZE - len >= addr &&
2812 - (!vma || addr + len <= vma->vm_start))
2813 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2814 return addr;
2815 }
2816
2817 @@ -106,7 +105,7 @@ full_search:
2818 }
2819 return -ENOMEM;
2820 }
2821 - if (likely(!vma || addr + len <= vma->vm_start)) {
2822 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2823 /*
2824 * Remember the place where we stopped the search:
2825 */
2826 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2827 addr = PAGE_ALIGN(addr);
2828
2829 vma = find_vma(mm, addr);
2830 - if (TASK_SIZE - len >= addr &&
2831 - (!vma || addr + len <= vma->vm_start))
2832 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2833 return addr;
2834 }
2835
2836 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2837 /* make sure it can fit in the remaining address space */
2838 if (likely(addr > len)) {
2839 vma = find_vma(mm, addr-len);
2840 - if (!vma || addr <= vma->vm_start) {
2841 + if (check_heap_stack_gap(vma, addr - len, len)) {
2842 /* remember the address as a hint for next time */
2843 return (mm->free_area_cache = addr-len);
2844 }
2845 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2846 if (unlikely(mm->mmap_base < len))
2847 goto bottomup;
2848
2849 - addr = mm->mmap_base-len;
2850 - if (do_colour_align)
2851 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2852 + addr = mm->mmap_base - len;
2853
2854 do {
2855 + if (do_colour_align)
2856 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2857 /*
2858 * Lookup failure means no vma is above this address,
2859 * else if new region fits below vma->vm_start,
2860 * return with success:
2861 */
2862 vma = find_vma(mm, addr);
2863 - if (likely(!vma || addr+len <= vma->vm_start)) {
2864 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2865 /* remember the address as a hint for next time */
2866 return (mm->free_area_cache = addr);
2867 }
2868 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2869 mm->cached_hole_size = vma->vm_start - addr;
2870
2871 /* try just below the current vma->vm_start */
2872 - addr = vma->vm_start-len;
2873 - if (do_colour_align)
2874 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2875 - } while (likely(len < vma->vm_start));
2876 + addr = skip_heap_stack_gap(vma, len);
2877 + } while (!IS_ERR_VALUE(addr));
2878
2879 bottomup:
2880 /*
2881 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h
2882 --- linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-05-19 00:06:34.000000000 -0400
2883 +++ linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-08-05 20:34:06.000000000 -0400
2884 @@ -14,18 +14,40 @@
2885 #define ATOMIC64_INIT(i) { (i) }
2886
2887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2888 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2889 +{
2890 + return v->counter;
2891 +}
2892 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2893 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2894 +{
2895 + return v->counter;
2896 +}
2897
2898 #define atomic_set(v, i) (((v)->counter) = i)
2899 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2900 +{
2901 + v->counter = i;
2902 +}
2903 #define atomic64_set(v, i) (((v)->counter) = i)
2904 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2905 +{
2906 + v->counter = i;
2907 +}
2908
2909 extern void atomic_add(int, atomic_t *);
2910 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2911 extern void atomic64_add(long, atomic64_t *);
2912 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2913 extern void atomic_sub(int, atomic_t *);
2914 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2915 extern void atomic64_sub(long, atomic64_t *);
2916 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2917
2918 extern int atomic_add_ret(int, atomic_t *);
2919 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2920 extern long atomic64_add_ret(long, atomic64_t *);
2921 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2922 extern int atomic_sub_ret(int, atomic_t *);
2923 extern long atomic64_sub_ret(long, atomic64_t *);
2924
2925 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2926 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2927
2928 #define atomic_inc_return(v) atomic_add_ret(1, v)
2929 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2930 +{
2931 + return atomic_add_ret_unchecked(1, v);
2932 +}
2933 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2934 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2935 +{
2936 + return atomic64_add_ret_unchecked(1, v);
2937 +}
2938
2939 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2940 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2941
2942 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2943 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2944 +{
2945 + return atomic_add_ret_unchecked(i, v);
2946 +}
2947 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2948 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2949 +{
2950 + return atomic64_add_ret_unchecked(i, v);
2951 +}
2952
2953 /*
2954 * atomic_inc_and_test - increment and test
2955 @@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
2956 * other cases.
2957 */
2958 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2959 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
2960 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2961
2962 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2963 @@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
2964 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2965
2966 #define atomic_inc(v) atomic_add(1, v)
2967 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2968 +{
2969 + atomic_add_unchecked(1, v);
2970 +}
2971 #define atomic64_inc(v) atomic64_add(1, v)
2972 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2973 +{
2974 + atomic64_add_unchecked(1, v);
2975 +}
2976
2977 #define atomic_dec(v) atomic_sub(1, v)
2978 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2979 +{
2980 + atomic_sub_unchecked(1, v);
2981 +}
2982 #define atomic64_dec(v) atomic64_sub(1, v)
2983 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2984 +{
2985 + atomic64_sub_unchecked(1, v);
2986 +}
2987
2988 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2989 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2990
2991 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2992 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2993 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
2994 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
2995
2996 static inline int atomic_add_unless(atomic_t *v, int a, int u)
2997 {
2998 - int c, old;
2999 + int c, old, new;
3000 c = atomic_read(v);
3001 for (;;) {
3002 - if (unlikely(c == (u)))
3003 + if (unlikely(c == u))
3004 break;
3005 - old = atomic_cmpxchg((v), c, c + (a));
3006 +
3007 + asm volatile("addcc %2, %0, %0\n"
3008 +
3009 +#ifdef CONFIG_PAX_REFCOUNT
3010 + "tvs %%icc, 6\n"
3011 +#endif
3012 +
3013 + : "=r" (new)
3014 + : "0" (c), "ir" (a)
3015 + : "cc");
3016 +
3017 + old = atomic_cmpxchg(v, c, new);
3018 if (likely(old == c))
3019 break;
3020 c = old;
3021 }
3022 - return c != (u);
3023 + return c != u;
3024 }
3025
3026 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3027 @@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3028
3029 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3030 {
3031 - long c, old;
3032 + long c, old, new;
3033 c = atomic64_read(v);
3034 for (;;) {
3035 - if (unlikely(c == (u)))
3036 + if (unlikely(c == u))
3037 break;
3038 - old = atomic64_cmpxchg((v), c, c + (a));
3039 +
3040 + asm volatile("addcc %2, %0, %0\n"
3041 +
3042 +#ifdef CONFIG_PAX_REFCOUNT
3043 + "tvs %%xcc, 6\n"
3044 +#endif
3045 +
3046 + : "=r" (new)
3047 + : "0" (c), "ir" (a)
3048 + : "cc");
3049 +
3050 + old = atomic64_cmpxchg(v, c, new);
3051 if (likely(old == c))
3052 break;
3053 c = old;
3054 }
3055 - return c != (u);
3056 + return c != u;
3057 }
3058
3059 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3060 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/cache.h linux-2.6.39.4/arch/sparc/include/asm/cache.h
3061 --- linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
3062 +++ linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
3063 @@ -10,7 +10,7 @@
3064 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3065
3066 #define L1_CACHE_SHIFT 5
3067 -#define L1_CACHE_BYTES 32
3068 +#define L1_CACHE_BYTES 32UL
3069
3070 #ifdef CONFIG_SPARC32
3071 #define SMP_CACHE_BYTES_SHIFT 5
3072 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_32.h linux-2.6.39.4/arch/sparc/include/asm/elf_32.h
3073 --- linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-05-19 00:06:34.000000000 -0400
3074 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-08-05 19:44:33.000000000 -0400
3075 @@ -114,6 +114,13 @@ typedef struct {
3076
3077 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3078
3079 +#ifdef CONFIG_PAX_ASLR
3080 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3081 +
3082 +#define PAX_DELTA_MMAP_LEN 16
3083 +#define PAX_DELTA_STACK_LEN 16
3084 +#endif
3085 +
3086 /* This yields a mask that user programs can use to figure out what
3087 instruction set this cpu supports. This can NOT be done in userspace
3088 on Sparc. */
3089 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_64.h linux-2.6.39.4/arch/sparc/include/asm/elf_64.h
3090 --- linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-05-19 00:06:34.000000000 -0400
3091 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-08-05 19:44:33.000000000 -0400
3092 @@ -162,6 +162,12 @@ typedef struct {
3093 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3094 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3095
3096 +#ifdef CONFIG_PAX_ASLR
3097 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3098 +
3099 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3100 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3101 +#endif
3102
3103 /* This yields a mask that user programs can use to figure out what
3104 instruction set this cpu supports. */
3105 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h
3106 --- linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
3107 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
3108 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3109 BTFIXUPDEF_INT(page_none)
3110 BTFIXUPDEF_INT(page_copy)
3111 BTFIXUPDEF_INT(page_readonly)
3112 +
3113 +#ifdef CONFIG_PAX_PAGEEXEC
3114 +BTFIXUPDEF_INT(page_shared_noexec)
3115 +BTFIXUPDEF_INT(page_copy_noexec)
3116 +BTFIXUPDEF_INT(page_readonly_noexec)
3117 +#endif
3118 +
3119 BTFIXUPDEF_INT(page_kernel)
3120
3121 #define PMD_SHIFT SUN4C_PMD_SHIFT
3122 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3123 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3124 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3125
3126 +#ifdef CONFIG_PAX_PAGEEXEC
3127 +extern pgprot_t PAGE_SHARED_NOEXEC;
3128 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3129 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3130 +#else
3131 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3132 +# define PAGE_COPY_NOEXEC PAGE_COPY
3133 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3134 +#endif
3135 +
3136 extern unsigned long page_kernel;
3137
3138 #ifdef MODULE
3139 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h
3140 --- linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-05-19 00:06:34.000000000 -0400
3141 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-05 19:44:33.000000000 -0400
3142 @@ -115,6 +115,13 @@
3143 SRMMU_EXEC | SRMMU_REF)
3144 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3145 SRMMU_EXEC | SRMMU_REF)
3146 +
3147 +#ifdef CONFIG_PAX_PAGEEXEC
3148 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3149 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3150 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3151 +#endif
3152 +
3153 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3154 SRMMU_DIRTY | SRMMU_REF)
3155
3156 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h
3157 --- linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-05-19 00:06:34.000000000 -0400
3158 +++ linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-08-05 19:44:33.000000000 -0400
3159 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3160
3161 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3162
3163 -static void inline arch_read_lock(arch_rwlock_t *lock)
3164 +static inline void arch_read_lock(arch_rwlock_t *lock)
3165 {
3166 unsigned long tmp1, tmp2;
3167
3168 __asm__ __volatile__ (
3169 "1: ldsw [%2], %0\n"
3170 " brlz,pn %0, 2f\n"
3171 -"4: add %0, 1, %1\n"
3172 +"4: addcc %0, 1, %1\n"
3173 +
3174 +#ifdef CONFIG_PAX_REFCOUNT
3175 +" tvs %%icc, 6\n"
3176 +#endif
3177 +
3178 " cas [%2], %0, %1\n"
3179 " cmp %0, %1\n"
3180 " bne,pn %%icc, 1b\n"
3181 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3182 " .previous"
3183 : "=&r" (tmp1), "=&r" (tmp2)
3184 : "r" (lock)
3185 - : "memory");
3186 + : "memory", "cc");
3187 }
3188
3189 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3190 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3191 {
3192 int tmp1, tmp2;
3193
3194 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3195 "1: ldsw [%2], %0\n"
3196 " brlz,a,pn %0, 2f\n"
3197 " mov 0, %0\n"
3198 -" add %0, 1, %1\n"
3199 +" addcc %0, 1, %1\n"
3200 +
3201 +#ifdef CONFIG_PAX_REFCOUNT
3202 +" tvs %%icc, 6\n"
3203 +#endif
3204 +
3205 " cas [%2], %0, %1\n"
3206 " cmp %0, %1\n"
3207 " bne,pn %%icc, 1b\n"
3208 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3209 return tmp1;
3210 }
3211
3212 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3213 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3214 {
3215 unsigned long tmp1, tmp2;
3216
3217 __asm__ __volatile__(
3218 "1: lduw [%2], %0\n"
3219 -" sub %0, 1, %1\n"
3220 +" subcc %0, 1, %1\n"
3221 +
3222 +#ifdef CONFIG_PAX_REFCOUNT
3223 +" tvs %%icc, 6\n"
3224 +#endif
3225 +
3226 " cas [%2], %0, %1\n"
3227 " cmp %0, %1\n"
3228 " bne,pn %%xcc, 1b\n"
3229 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3230 : "memory");
3231 }
3232
3233 -static void inline arch_write_lock(arch_rwlock_t *lock)
3234 +static inline void arch_write_lock(arch_rwlock_t *lock)
3235 {
3236 unsigned long mask, tmp1, tmp2;
3237
3238 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3239 : "memory");
3240 }
3241
3242 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3243 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3244 {
3245 __asm__ __volatile__(
3246 " stw %%g0, [%0]"
3247 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3248 : "memory");
3249 }
3250
3251 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3252 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3253 {
3254 unsigned long mask, tmp1, tmp2, result;
3255
3256 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h
3257 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-05-19 00:06:34.000000000 -0400
3258 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-08-05 19:44:33.000000000 -0400
3259 @@ -50,6 +50,8 @@ struct thread_info {
3260 unsigned long w_saved;
3261
3262 struct restart_block restart_block;
3263 +
3264 + unsigned long lowest_stack;
3265 };
3266
3267 /*
3268 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h
3269 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-05-19 00:06:34.000000000 -0400
3270 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-08-05 19:44:33.000000000 -0400
3271 @@ -63,6 +63,8 @@ struct thread_info {
3272 struct pt_regs *kern_una_regs;
3273 unsigned int kern_una_insn;
3274
3275 + unsigned long lowest_stack;
3276 +
3277 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3278 };
3279
3280 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h
3281 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
3282 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
3283 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3284
3285 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3286 {
3287 - if (n && __access_ok((unsigned long) to, n))
3288 + if ((long)n < 0)
3289 + return n;
3290 +
3291 + if (n && __access_ok((unsigned long) to, n)) {
3292 + if (!__builtin_constant_p(n))
3293 + check_object_size(from, n, true);
3294 return __copy_user(to, (__force void __user *) from, n);
3295 - else
3296 + } else
3297 return n;
3298 }
3299
3300 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3301 {
3302 + if ((long)n < 0)
3303 + return n;
3304 +
3305 + if (!__builtin_constant_p(n))
3306 + check_object_size(from, n, true);
3307 +
3308 return __copy_user(to, (__force void __user *) from, n);
3309 }
3310
3311 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3312 {
3313 - if (n && __access_ok((unsigned long) from, n))
3314 + if ((long)n < 0)
3315 + return n;
3316 +
3317 + if (n && __access_ok((unsigned long) from, n)) {
3318 + if (!__builtin_constant_p(n))
3319 + check_object_size(to, n, false);
3320 return __copy_user((__force void __user *) to, from, n);
3321 - else
3322 + } else
3323 return n;
3324 }
3325
3326 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3327 {
3328 + if ((long)n < 0)
3329 + return n;
3330 +
3331 return __copy_user((__force void __user *) to, from, n);
3332 }
3333
3334 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h
3335 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
3336 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
3337 @@ -10,6 +10,7 @@
3338 #include <linux/compiler.h>
3339 #include <linux/string.h>
3340 #include <linux/thread_info.h>
3341 +#include <linux/kernel.h>
3342 #include <asm/asi.h>
3343 #include <asm/system.h>
3344 #include <asm/spitfire.h>
3345 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3346 static inline unsigned long __must_check
3347 copy_from_user(void *to, const void __user *from, unsigned long size)
3348 {
3349 - unsigned long ret = ___copy_from_user(to, from, size);
3350 + unsigned long ret;
3351
3352 + if ((long)size < 0 || size > INT_MAX)
3353 + return size;
3354 +
3355 + if (!__builtin_constant_p(size))
3356 + check_object_size(to, size, false);
3357 +
3358 + ret = ___copy_from_user(to, from, size);
3359 if (unlikely(ret))
3360 ret = copy_from_user_fixup(to, from, size);
3361
3362 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3363 static inline unsigned long __must_check
3364 copy_to_user(void __user *to, const void *from, unsigned long size)
3365 {
3366 - unsigned long ret = ___copy_to_user(to, from, size);
3367 + unsigned long ret;
3368 +
3369 + if ((long)size < 0 || size > INT_MAX)
3370 + return size;
3371 +
3372 + if (!__builtin_constant_p(size))
3373 + check_object_size(from, size, true);
3374
3375 + ret = ___copy_to_user(to, from, size);
3376 if (unlikely(ret))
3377 ret = copy_to_user_fixup(to, from, size);
3378 return ret;
3379 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess.h linux-2.6.39.4/arch/sparc/include/asm/uaccess.h
3380 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
3381 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
3382 @@ -1,5 +1,13 @@
3383 #ifndef ___ASM_SPARC_UACCESS_H
3384 #define ___ASM_SPARC_UACCESS_H
3385 +
3386 +#ifdef __KERNEL__
3387 +#ifndef __ASSEMBLY__
3388 +#include <linux/types.h>
3389 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3390 +#endif
3391 +#endif
3392 +
3393 #if defined(__sparc__) && defined(__arch64__)
3394 #include <asm/uaccess_64.h>
3395 #else
3396 diff -urNp linux-2.6.39.4/arch/sparc/kernel/Makefile linux-2.6.39.4/arch/sparc/kernel/Makefile
3397 --- linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-05-19 00:06:34.000000000 -0400
3398 +++ linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-08-05 19:44:33.000000000 -0400
3399 @@ -3,7 +3,7 @@
3400 #
3401
3402 asflags-y := -ansi
3403 -ccflags-y := -Werror
3404 +#ccflags-y := -Werror
3405
3406 extra-y := head_$(BITS).o
3407 extra-y += init_task.o
3408 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_32.c linux-2.6.39.4/arch/sparc/kernel/process_32.c
3409 --- linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400
3410 +++ linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-08-05 19:44:33.000000000 -0400
3411 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
3412 rw->ins[4], rw->ins[5],
3413 rw->ins[6],
3414 rw->ins[7]);
3415 - printk("%pS\n", (void *) rw->ins[7]);
3416 + printk("%pA\n", (void *) rw->ins[7]);
3417 rw = (struct reg_window32 *) rw->ins[6];
3418 }
3419 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3420 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
3421
3422 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3423 r->psr, r->pc, r->npc, r->y, print_tainted());
3424 - printk("PC: <%pS>\n", (void *) r->pc);
3425 + printk("PC: <%pA>\n", (void *) r->pc);
3426 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3427 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3428 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3429 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3430 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3431 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3432 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3433 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3434
3435 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3436 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3437 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
3438 rw = (struct reg_window32 *) fp;
3439 pc = rw->ins[7];
3440 printk("[%08lx : ", pc);
3441 - printk("%pS ] ", (void *) pc);
3442 + printk("%pA ] ", (void *) pc);
3443 fp = rw->ins[6];
3444 } while (++count < 16);
3445 printk("\n");
3446 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_64.c linux-2.6.39.4/arch/sparc/kernel/process_64.c
3447 --- linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400
3448 +++ linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-08-05 19:44:33.000000000 -0400
3449 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3450 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3451 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3452 if (regs->tstate & TSTATE_PRIV)
3453 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3454 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3455 }
3456
3457 void show_regs(struct pt_regs *regs)
3458 {
3459 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3460 regs->tpc, regs->tnpc, regs->y, print_tainted());
3461 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3462 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3463 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3464 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3465 regs->u_regs[3]);
3466 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3467 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3468 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3469 regs->u_regs[15]);
3470 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3471 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3472 show_regwindow(regs);
3473 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3474 }
3475 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3476 ((tp && tp->task) ? tp->task->pid : -1));
3477
3478 if (gp->tstate & TSTATE_PRIV) {
3479 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3480 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3481 (void *) gp->tpc,
3482 (void *) gp->o7,
3483 (void *) gp->i7,
3484 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c
3485 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-05-19 00:06:34.000000000 -0400
3486 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-05 19:44:33.000000000 -0400
3487 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3488 if (ARCH_SUN4C && len > 0x20000000)
3489 return -ENOMEM;
3490 if (!addr)
3491 - addr = TASK_UNMAPPED_BASE;
3492 + addr = current->mm->mmap_base;
3493
3494 if (flags & MAP_SHARED)
3495 addr = COLOUR_ALIGN(addr);
3496 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3497 }
3498 if (TASK_SIZE - PAGE_SIZE - len < addr)
3499 return -ENOMEM;
3500 - if (!vmm || addr + len <= vmm->vm_start)
3501 + if (check_heap_stack_gap(vmm, addr, len))
3502 return addr;
3503 addr = vmm->vm_end;
3504 if (flags & MAP_SHARED)
3505 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c
3506 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-05-19 00:06:34.000000000 -0400
3507 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-05 19:44:33.000000000 -0400
3508 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3509 /* We do not accept a shared mapping if it would violate
3510 * cache aliasing constraints.
3511 */
3512 - if ((flags & MAP_SHARED) &&
3513 + if ((filp || (flags & MAP_SHARED)) &&
3514 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3515 return -EINVAL;
3516 return addr;
3517 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3518 if (filp || (flags & MAP_SHARED))
3519 do_color_align = 1;
3520
3521 +#ifdef CONFIG_PAX_RANDMMAP
3522 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3523 +#endif
3524 +
3525 if (addr) {
3526 if (do_color_align)
3527 addr = COLOUR_ALIGN(addr, pgoff);
3528 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3529 addr = PAGE_ALIGN(addr);
3530
3531 vma = find_vma(mm, addr);
3532 - if (task_size - len >= addr &&
3533 - (!vma || addr + len <= vma->vm_start))
3534 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3535 return addr;
3536 }
3537
3538 if (len > mm->cached_hole_size) {
3539 - start_addr = addr = mm->free_area_cache;
3540 + start_addr = addr = mm->free_area_cache;
3541 } else {
3542 - start_addr = addr = TASK_UNMAPPED_BASE;
3543 + start_addr = addr = mm->mmap_base;
3544 mm->cached_hole_size = 0;
3545 }
3546
3547 @@ -174,14 +177,14 @@ full_search:
3548 vma = find_vma(mm, VA_EXCLUDE_END);
3549 }
3550 if (unlikely(task_size < addr)) {
3551 - if (start_addr != TASK_UNMAPPED_BASE) {
3552 - start_addr = addr = TASK_UNMAPPED_BASE;
3553 + if (start_addr != mm->mmap_base) {
3554 + start_addr = addr = mm->mmap_base;
3555 mm->cached_hole_size = 0;
3556 goto full_search;
3557 }
3558 return -ENOMEM;
3559 }
3560 - if (likely(!vma || addr + len <= vma->vm_start)) {
3561 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3562 /*
3563 * Remember the place where we stopped the search:
3564 */
3565 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3566 /* We do not accept a shared mapping if it would violate
3567 * cache aliasing constraints.
3568 */
3569 - if ((flags & MAP_SHARED) &&
3570 + if ((filp || (flags & MAP_SHARED)) &&
3571 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3572 return -EINVAL;
3573 return addr;
3574 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3575 addr = PAGE_ALIGN(addr);
3576
3577 vma = find_vma(mm, addr);
3578 - if (task_size - len >= addr &&
3579 - (!vma || addr + len <= vma->vm_start))
3580 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3581 return addr;
3582 }
3583
3584 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3585 /* make sure it can fit in the remaining address space */
3586 if (likely(addr > len)) {
3587 vma = find_vma(mm, addr-len);
3588 - if (!vma || addr <= vma->vm_start) {
3589 + if (check_heap_stack_gap(vma, addr - len, len)) {
3590 /* remember the address as a hint for next time */
3591 return (mm->free_area_cache = addr-len);
3592 }
3593 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3594 if (unlikely(mm->mmap_base < len))
3595 goto bottomup;
3596
3597 - addr = mm->mmap_base-len;
3598 - if (do_color_align)
3599 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3600 + addr = mm->mmap_base - len;
3601
3602 do {
3603 + if (do_color_align)
3604 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3605 /*
3606 * Lookup failure means no vma is above this address,
3607 * else if new region fits below vma->vm_start,
3608 * return with success:
3609 */
3610 vma = find_vma(mm, addr);
3611 - if (likely(!vma || addr+len <= vma->vm_start)) {
3612 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3613 /* remember the address as a hint for next time */
3614 return (mm->free_area_cache = addr);
3615 }
3616 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3617 mm->cached_hole_size = vma->vm_start - addr;
3618
3619 /* try just below the current vma->vm_start */
3620 - addr = vma->vm_start-len;
3621 - if (do_color_align)
3622 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3623 - } while (likely(len < vma->vm_start));
3624 + addr = skip_heap_stack_gap(vma, len);
3625 + } while (!IS_ERR_VALUE(addr));
3626
3627 bottomup:
3628 /*
3629 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3630 gap == RLIM_INFINITY ||
3631 sysctl_legacy_va_layout) {
3632 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3633 +
3634 +#ifdef CONFIG_PAX_RANDMMAP
3635 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3636 + mm->mmap_base += mm->delta_mmap;
3637 +#endif
3638 +
3639 mm->get_unmapped_area = arch_get_unmapped_area;
3640 mm->unmap_area = arch_unmap_area;
3641 } else {
3642 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3643 gap = (task_size / 6 * 5);
3644
3645 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3646 +
3647 +#ifdef CONFIG_PAX_RANDMMAP
3648 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3649 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3650 +#endif
3651 +
3652 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3653 mm->unmap_area = arch_unmap_area_topdown;
3654 }
3655 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_32.c linux-2.6.39.4/arch/sparc/kernel/traps_32.c
3656 --- linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-05-19 00:06:34.000000000 -0400
3657 +++ linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-08-05 19:44:33.000000000 -0400
3658 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3659 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3660 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3661
3662 +extern void gr_handle_kernel_exploit(void);
3663 +
3664 void die_if_kernel(char *str, struct pt_regs *regs)
3665 {
3666 static int die_counter;
3667 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3668 count++ < 30 &&
3669 (((unsigned long) rw) >= PAGE_OFFSET) &&
3670 !(((unsigned long) rw) & 0x7)) {
3671 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3672 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3673 (void *) rw->ins[7]);
3674 rw = (struct reg_window32 *)rw->ins[6];
3675 }
3676 }
3677 printk("Instruction DUMP:");
3678 instruction_dump ((unsigned long *) regs->pc);
3679 - if(regs->psr & PSR_PS)
3680 + if(regs->psr & PSR_PS) {
3681 + gr_handle_kernel_exploit();
3682 do_exit(SIGKILL);
3683 + }
3684 do_exit(SIGSEGV);
3685 }
3686
3687 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_64.c linux-2.6.39.4/arch/sparc/kernel/traps_64.c
3688 --- linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-05-19 00:06:34.000000000 -0400
3689 +++ linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-08-05 19:44:33.000000000 -0400
3690 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3691 i + 1,
3692 p->trapstack[i].tstate, p->trapstack[i].tpc,
3693 p->trapstack[i].tnpc, p->trapstack[i].tt);
3694 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3695 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3696 }
3697 }
3698
3699 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3700
3701 lvl -= 0x100;
3702 if (regs->tstate & TSTATE_PRIV) {
3703 +
3704 +#ifdef CONFIG_PAX_REFCOUNT
3705 + if (lvl == 6)
3706 + pax_report_refcount_overflow(regs);
3707 +#endif
3708 +
3709 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3710 die_if_kernel(buffer, regs);
3711 }
3712 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3713 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3714 {
3715 char buffer[32];
3716 -
3717 +
3718 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3719 0, lvl, SIGTRAP) == NOTIFY_STOP)
3720 return;
3721
3722 +#ifdef CONFIG_PAX_REFCOUNT
3723 + if (lvl == 6)
3724 + pax_report_refcount_overflow(regs);
3725 +#endif
3726 +
3727 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3728
3729 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3730 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3731 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3732 printk("%s" "ERROR(%d): ",
3733 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3734 - printk("TPC<%pS>\n", (void *) regs->tpc);
3735 + printk("TPC<%pA>\n", (void *) regs->tpc);
3736 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3737 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3738 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3739 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3740 smp_processor_id(),
3741 (type & 0x1) ? 'I' : 'D',
3742 regs->tpc);
3743 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3744 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3745 panic("Irrecoverable Cheetah+ parity error.");
3746 }
3747
3748 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3749 smp_processor_id(),
3750 (type & 0x1) ? 'I' : 'D',
3751 regs->tpc);
3752 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3753 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3754 }
3755
3756 struct sun4v_error_entry {
3757 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3758
3759 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3760 regs->tpc, tl);
3761 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3762 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3763 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3764 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3765 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3766 (void *) regs->u_regs[UREG_I7]);
3767 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3768 "pte[%lx] error[%lx]\n",
3769 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3770
3771 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3772 regs->tpc, tl);
3773 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3774 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3775 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3776 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3777 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3778 (void *) regs->u_regs[UREG_I7]);
3779 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3780 "pte[%lx] error[%lx]\n",
3781 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3782 fp = (unsigned long)sf->fp + STACK_BIAS;
3783 }
3784
3785 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3786 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3787 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3788 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3789 int index = tsk->curr_ret_stack;
3790 if (tsk->ret_stack && index >= graph) {
3791 pc = tsk->ret_stack[index - graph].ret;
3792 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3793 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3794 graph++;
3795 }
3796 }
3797 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3798 return (struct reg_window *) (fp + STACK_BIAS);
3799 }
3800
3801 +extern void gr_handle_kernel_exploit(void);
3802 +
3803 void die_if_kernel(char *str, struct pt_regs *regs)
3804 {
3805 static int die_counter;
3806 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3807 while (rw &&
3808 count++ < 30 &&
3809 kstack_valid(tp, (unsigned long) rw)) {
3810 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3811 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3812 (void *) rw->ins[7]);
3813
3814 rw = kernel_stack_up(rw);
3815 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3816 }
3817 user_instruction_dump ((unsigned int __user *) regs->tpc);
3818 }
3819 - if (regs->tstate & TSTATE_PRIV)
3820 + if (regs->tstate & TSTATE_PRIV) {
3821 + gr_handle_kernel_exploit();
3822 do_exit(SIGKILL);
3823 + }
3824 do_exit(SIGSEGV);
3825 }
3826 EXPORT_SYMBOL(die_if_kernel);
3827 diff -urNp linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c
3828 --- linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-05-19 00:06:34.000000000 -0400
3829 +++ linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-08-05 19:44:33.000000000 -0400
3830 @@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs
3831 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3832
3833 if (__ratelimit(&ratelimit)) {
3834 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3835 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3836 regs->tpc, (void *) regs->tpc);
3837 }
3838 }
3839 diff -urNp linux-2.6.39.4/arch/sparc/lib/atomic_64.S linux-2.6.39.4/arch/sparc/lib/atomic_64.S
3840 --- linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-05-19 00:06:34.000000000 -0400
3841 +++ linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-08-05 19:44:33.000000000 -0400
3842 @@ -18,7 +18,12 @@
3843 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3844 BACKOFF_SETUP(%o2)
3845 1: lduw [%o1], %g1
3846 - add %g1, %o0, %g7
3847 + addcc %g1, %o0, %g7
3848 +
3849 +#ifdef CONFIG_PAX_REFCOUNT
3850 + tvs %icc, 6
3851 +#endif
3852 +
3853 cas [%o1], %g1, %g7
3854 cmp %g1, %g7
3855 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3856 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3857 2: BACKOFF_SPIN(%o2, %o3, 1b)
3858 .size atomic_add, .-atomic_add
3859
3860 + .globl atomic_add_unchecked
3861 + .type atomic_add_unchecked,#function
3862 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3863 + BACKOFF_SETUP(%o2)
3864 +1: lduw [%o1], %g1
3865 + add %g1, %o0, %g7
3866 + cas [%o1], %g1, %g7
3867 + cmp %g1, %g7
3868 + bne,pn %icc, 2f
3869 + nop
3870 + retl
3871 + nop
3872 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3873 + .size atomic_add_unchecked, .-atomic_add_unchecked
3874 +
3875 .globl atomic_sub
3876 .type atomic_sub,#function
3877 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3878 BACKOFF_SETUP(%o2)
3879 1: lduw [%o1], %g1
3880 - sub %g1, %o0, %g7
3881 + subcc %g1, %o0, %g7
3882 +
3883 +#ifdef CONFIG_PAX_REFCOUNT
3884 + tvs %icc, 6
3885 +#endif
3886 +
3887 cas [%o1], %g1, %g7
3888 cmp %g1, %g7
3889 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3890 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3891 2: BACKOFF_SPIN(%o2, %o3, 1b)
3892 .size atomic_sub, .-atomic_sub
3893
3894 + .globl atomic_sub_unchecked
3895 + .type atomic_sub_unchecked,#function
3896 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3897 + BACKOFF_SETUP(%o2)
3898 +1: lduw [%o1], %g1
3899 + sub %g1, %o0, %g7
3900 + cas [%o1], %g1, %g7
3901 + cmp %g1, %g7
3902 + bne,pn %icc, 2f
3903 + nop
3904 + retl
3905 + nop
3906 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3907 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3908 +
3909 .globl atomic_add_ret
3910 .type atomic_add_ret,#function
3911 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3912 BACKOFF_SETUP(%o2)
3913 1: lduw [%o1], %g1
3914 - add %g1, %o0, %g7
3915 + addcc %g1, %o0, %g7
3916 +
3917 +#ifdef CONFIG_PAX_REFCOUNT
3918 + tvs %icc, 6
3919 +#endif
3920 +
3921 cas [%o1], %g1, %g7
3922 cmp %g1, %g7
3923 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3924 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3925 2: BACKOFF_SPIN(%o2, %o3, 1b)
3926 .size atomic_add_ret, .-atomic_add_ret
3927
3928 + .globl atomic_add_ret_unchecked
3929 + .type atomic_add_ret_unchecked,#function
3930 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3931 + BACKOFF_SETUP(%o2)
3932 +1: lduw [%o1], %g1
3933 + addcc %g1, %o0, %g7
3934 + cas [%o1], %g1, %g7
3935 + cmp %g1, %g7
3936 + bne,pn %icc, 2f
3937 + add %g7, %o0, %g7
3938 + sra %g7, 0, %o0
3939 + retl
3940 + nop
3941 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3942 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3943 +
3944 .globl atomic_sub_ret
3945 .type atomic_sub_ret,#function
3946 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3947 BACKOFF_SETUP(%o2)
3948 1: lduw [%o1], %g1
3949 - sub %g1, %o0, %g7
3950 + subcc %g1, %o0, %g7
3951 +
3952 +#ifdef CONFIG_PAX_REFCOUNT
3953 + tvs %icc, 6
3954 +#endif
3955 +
3956 cas [%o1], %g1, %g7
3957 cmp %g1, %g7
3958 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3959 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3960 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3961 BACKOFF_SETUP(%o2)
3962 1: ldx [%o1], %g1
3963 - add %g1, %o0, %g7
3964 + addcc %g1, %o0, %g7
3965 +
3966 +#ifdef CONFIG_PAX_REFCOUNT
3967 + tvs %xcc, 6
3968 +#endif
3969 +
3970 casx [%o1], %g1, %g7
3971 cmp %g1, %g7
3972 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3973 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3974 2: BACKOFF_SPIN(%o2, %o3, 1b)
3975 .size atomic64_add, .-atomic64_add
3976
3977 + .globl atomic64_add_unchecked
3978 + .type atomic64_add_unchecked,#function
3979 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3980 + BACKOFF_SETUP(%o2)
3981 +1: ldx [%o1], %g1
3982 + addcc %g1, %o0, %g7
3983 + casx [%o1], %g1, %g7
3984 + cmp %g1, %g7
3985 + bne,pn %xcc, 2f
3986 + nop
3987 + retl
3988 + nop
3989 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3990 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
3991 +
3992 .globl atomic64_sub
3993 .type atomic64_sub,#function
3994 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3995 BACKOFF_SETUP(%o2)
3996 1: ldx [%o1], %g1
3997 - sub %g1, %o0, %g7
3998 + subcc %g1, %o0, %g7
3999 +
4000 +#ifdef CONFIG_PAX_REFCOUNT
4001 + tvs %xcc, 6
4002 +#endif
4003 +
4004 casx [%o1], %g1, %g7
4005 cmp %g1, %g7
4006 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4007 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4008 2: BACKOFF_SPIN(%o2, %o3, 1b)
4009 .size atomic64_sub, .-atomic64_sub
4010
4011 + .globl atomic64_sub_unchecked
4012 + .type atomic64_sub_unchecked,#function
4013 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4014 + BACKOFF_SETUP(%o2)
4015 +1: ldx [%o1], %g1
4016 + subcc %g1, %o0, %g7
4017 + casx [%o1], %g1, %g7
4018 + cmp %g1, %g7
4019 + bne,pn %xcc, 2f
4020 + nop
4021 + retl
4022 + nop
4023 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4024 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4025 +
4026 .globl atomic64_add_ret
4027 .type atomic64_add_ret,#function
4028 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4029 BACKOFF_SETUP(%o2)
4030 1: ldx [%o1], %g1
4031 - add %g1, %o0, %g7
4032 + addcc %g1, %o0, %g7
4033 +
4034 +#ifdef CONFIG_PAX_REFCOUNT
4035 + tvs %xcc, 6
4036 +#endif
4037 +
4038 casx [%o1], %g1, %g7
4039 cmp %g1, %g7
4040 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4041 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4042 2: BACKOFF_SPIN(%o2, %o3, 1b)
4043 .size atomic64_add_ret, .-atomic64_add_ret
4044
4045 + .globl atomic64_add_ret_unchecked
4046 + .type atomic64_add_ret_unchecked,#function
4047 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4048 + BACKOFF_SETUP(%o2)
4049 +1: ldx [%o1], %g1
4050 + addcc %g1, %o0, %g7
4051 + casx [%o1], %g1, %g7
4052 + cmp %g1, %g7
4053 + bne,pn %xcc, 2f
4054 + add %g7, %o0, %g7
4055 + mov %g7, %o0
4056 + retl
4057 + nop
4058 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4059 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4060 +
4061 .globl atomic64_sub_ret
4062 .type atomic64_sub_ret,#function
4063 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4064 BACKOFF_SETUP(%o2)
4065 1: ldx [%o1], %g1
4066 - sub %g1, %o0, %g7
4067 + subcc %g1, %o0, %g7
4068 +
4069 +#ifdef CONFIG_PAX_REFCOUNT
4070 + tvs %xcc, 6
4071 +#endif
4072 +
4073 casx [%o1], %g1, %g7
4074 cmp %g1, %g7
4075 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4076 diff -urNp linux-2.6.39.4/arch/sparc/lib/ksyms.c linux-2.6.39.4/arch/sparc/lib/ksyms.c
4077 --- linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-05-19 00:06:34.000000000 -0400
4078 +++ linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-08-05 19:44:33.000000000 -0400
4079 @@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
4080
4081 /* Atomic counter implementation. */
4082 EXPORT_SYMBOL(atomic_add);
4083 +EXPORT_SYMBOL(atomic_add_unchecked);
4084 EXPORT_SYMBOL(atomic_add_ret);
4085 EXPORT_SYMBOL(atomic_sub);
4086 +EXPORT_SYMBOL(atomic_sub_unchecked);
4087 EXPORT_SYMBOL(atomic_sub_ret);
4088 EXPORT_SYMBOL(atomic64_add);
4089 +EXPORT_SYMBOL(atomic64_add_unchecked);
4090 EXPORT_SYMBOL(atomic64_add_ret);
4091 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4092 EXPORT_SYMBOL(atomic64_sub);
4093 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4094 EXPORT_SYMBOL(atomic64_sub_ret);
4095
4096 /* Atomic bit operations. */
4097 diff -urNp linux-2.6.39.4/arch/sparc/lib/Makefile linux-2.6.39.4/arch/sparc/lib/Makefile
4098 --- linux-2.6.39.4/arch/sparc/lib/Makefile 2011-05-19 00:06:34.000000000 -0400
4099 +++ linux-2.6.39.4/arch/sparc/lib/Makefile 2011-08-05 19:44:33.000000000 -0400
4100 @@ -2,7 +2,7 @@
4101 #
4102
4103 asflags-y := -ansi -DST_DIV0=0x02
4104 -ccflags-y := -Werror
4105 +#ccflags-y := -Werror
4106
4107 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4108 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4109 diff -urNp linux-2.6.39.4/arch/sparc/Makefile linux-2.6.39.4/arch/sparc/Makefile
4110 --- linux-2.6.39.4/arch/sparc/Makefile 2011-05-19 00:06:34.000000000 -0400
4111 +++ linux-2.6.39.4/arch/sparc/Makefile 2011-08-05 19:44:33.000000000 -0400
4112 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4113 # Export what is needed by arch/sparc/boot/Makefile
4114 export VMLINUX_INIT VMLINUX_MAIN
4115 VMLINUX_INIT := $(head-y) $(init-y)
4116 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4117 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4118 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4119 VMLINUX_MAIN += $(drivers-y) $(net-y)
4120
4121 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_32.c linux-2.6.39.4/arch/sparc/mm/fault_32.c
4122 --- linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-05-19 00:06:34.000000000 -0400
4123 +++ linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-08-05 19:44:33.000000000 -0400
4124 @@ -22,6 +22,9 @@
4125 #include <linux/interrupt.h>
4126 #include <linux/module.h>
4127 #include <linux/kdebug.h>
4128 +#include <linux/slab.h>
4129 +#include <linux/pagemap.h>
4130 +#include <linux/compiler.h>
4131
4132 #include <asm/system.h>
4133 #include <asm/page.h>
4134 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4135 return safe_compute_effective_address(regs, insn);
4136 }
4137
4138 +#ifdef CONFIG_PAX_PAGEEXEC
4139 +#ifdef CONFIG_PAX_DLRESOLVE
4140 +static void pax_emuplt_close(struct vm_area_struct *vma)
4141 +{
4142 + vma->vm_mm->call_dl_resolve = 0UL;
4143 +}
4144 +
4145 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4146 +{
4147 + unsigned int *kaddr;
4148 +
4149 + vmf->page = alloc_page(GFP_HIGHUSER);
4150 + if (!vmf->page)
4151 + return VM_FAULT_OOM;
4152 +
4153 + kaddr = kmap(vmf->page);
4154 + memset(kaddr, 0, PAGE_SIZE);
4155 + kaddr[0] = 0x9DE3BFA8U; /* save */
4156 + flush_dcache_page(vmf->page);
4157 + kunmap(vmf->page);
4158 + return VM_FAULT_MAJOR;
4159 +}
4160 +
4161 +static const struct vm_operations_struct pax_vm_ops = {
4162 + .close = pax_emuplt_close,
4163 + .fault = pax_emuplt_fault
4164 +};
4165 +
4166 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4167 +{
4168 + int ret;
4169 +
4170 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4171 + vma->vm_mm = current->mm;
4172 + vma->vm_start = addr;
4173 + vma->vm_end = addr + PAGE_SIZE;
4174 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4175 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4176 + vma->vm_ops = &pax_vm_ops;
4177 +
4178 + ret = insert_vm_struct(current->mm, vma);
4179 + if (ret)
4180 + return ret;
4181 +
4182 + ++current->mm->total_vm;
4183 + return 0;
4184 +}
4185 +#endif
4186 +
4187 +/*
4188 + * PaX: decide what to do with offenders (regs->pc = fault address)
4189 + *
4190 + * returns 1 when task should be killed
4191 + * 2 when patched PLT trampoline was detected
4192 + * 3 when unpatched PLT trampoline was detected
4193 + */
4194 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4195 +{
4196 +
4197 +#ifdef CONFIG_PAX_EMUPLT
4198 + int err;
4199 +
4200 + do { /* PaX: patched PLT emulation #1 */
4201 + unsigned int sethi1, sethi2, jmpl;
4202 +
4203 + err = get_user(sethi1, (unsigned int *)regs->pc);
4204 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4205 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4206 +
4207 + if (err)
4208 + break;
4209 +
4210 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4211 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4212 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4213 + {
4214 + unsigned int addr;
4215 +
4216 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4217 + addr = regs->u_regs[UREG_G1];
4218 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4219 + regs->pc = addr;
4220 + regs->npc = addr+4;
4221 + return 2;
4222 + }
4223 + } while (0);
4224 +
4225 + { /* PaX: patched PLT emulation #2 */
4226 + unsigned int ba;
4227 +
4228 + err = get_user(ba, (unsigned int *)regs->pc);
4229 +
4230 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4231 + unsigned int addr;
4232 +
4233 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4234 + regs->pc = addr;
4235 + regs->npc = addr+4;
4236 + return 2;
4237 + }
4238 + }
4239 +
4240 + do { /* PaX: patched PLT emulation #3 */
4241 + unsigned int sethi, jmpl, nop;
4242 +
4243 + err = get_user(sethi, (unsigned int *)regs->pc);
4244 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4245 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4246 +
4247 + if (err)
4248 + break;
4249 +
4250 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4251 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4252 + nop == 0x01000000U)
4253 + {
4254 + unsigned int addr;
4255 +
4256 + addr = (sethi & 0x003FFFFFU) << 10;
4257 + regs->u_regs[UREG_G1] = addr;
4258 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4259 + regs->pc = addr;
4260 + regs->npc = addr+4;
4261 + return 2;
4262 + }
4263 + } while (0);
4264 +
4265 + do { /* PaX: unpatched PLT emulation step 1 */
4266 + unsigned int sethi, ba, nop;
4267 +
4268 + err = get_user(sethi, (unsigned int *)regs->pc);
4269 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4270 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4271 +
4272 + if (err)
4273 + break;
4274 +
4275 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4276 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4277 + nop == 0x01000000U)
4278 + {
4279 + unsigned int addr, save, call;
4280 +
4281 + if ((ba & 0xFFC00000U) == 0x30800000U)
4282 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4283 + else
4284 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4285 +
4286 + err = get_user(save, (unsigned int *)addr);
4287 + err |= get_user(call, (unsigned int *)(addr+4));
4288 + err |= get_user(nop, (unsigned int *)(addr+8));
4289 + if (err)
4290 + break;
4291 +
4292 +#ifdef CONFIG_PAX_DLRESOLVE
4293 + if (save == 0x9DE3BFA8U &&
4294 + (call & 0xC0000000U) == 0x40000000U &&
4295 + nop == 0x01000000U)
4296 + {
4297 + struct vm_area_struct *vma;
4298 + unsigned long call_dl_resolve;
4299 +
4300 + down_read(&current->mm->mmap_sem);
4301 + call_dl_resolve = current->mm->call_dl_resolve;
4302 + up_read(&current->mm->mmap_sem);
4303 + if (likely(call_dl_resolve))
4304 + goto emulate;
4305 +
4306 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4307 +
4308 + down_write(&current->mm->mmap_sem);
4309 + if (current->mm->call_dl_resolve) {
4310 + call_dl_resolve = current->mm->call_dl_resolve;
4311 + up_write(&current->mm->mmap_sem);
4312 + if (vma)
4313 + kmem_cache_free(vm_area_cachep, vma);
4314 + goto emulate;
4315 + }
4316 +
4317 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4318 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4319 + up_write(&current->mm->mmap_sem);
4320 + if (vma)
4321 + kmem_cache_free(vm_area_cachep, vma);
4322 + return 1;
4323 + }
4324 +
4325 + if (pax_insert_vma(vma, call_dl_resolve)) {
4326 + up_write(&current->mm->mmap_sem);
4327 + kmem_cache_free(vm_area_cachep, vma);
4328 + return 1;
4329 + }
4330 +
4331 + current->mm->call_dl_resolve = call_dl_resolve;
4332 + up_write(&current->mm->mmap_sem);
4333 +
4334 +emulate:
4335 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4336 + regs->pc = call_dl_resolve;
4337 + regs->npc = addr+4;
4338 + return 3;
4339 + }
4340 +#endif
4341 +
4342 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4343 + if ((save & 0xFFC00000U) == 0x05000000U &&
4344 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4345 + nop == 0x01000000U)
4346 + {
4347 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4348 + regs->u_regs[UREG_G2] = addr + 4;
4349 + addr = (save & 0x003FFFFFU) << 10;
4350 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4351 + regs->pc = addr;
4352 + regs->npc = addr+4;
4353 + return 3;
4354 + }
4355 + }
4356 + } while (0);
4357 +
4358 + do { /* PaX: unpatched PLT emulation step 2 */
4359 + unsigned int save, call, nop;
4360 +
4361 + err = get_user(save, (unsigned int *)(regs->pc-4));
4362 + err |= get_user(call, (unsigned int *)regs->pc);
4363 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4364 + if (err)
4365 + break;
4366 +
4367 + if (save == 0x9DE3BFA8U &&
4368 + (call & 0xC0000000U) == 0x40000000U &&
4369 + nop == 0x01000000U)
4370 + {
4371 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4372 +
4373 + regs->u_regs[UREG_RETPC] = regs->pc;
4374 + regs->pc = dl_resolve;
4375 + regs->npc = dl_resolve+4;
4376 + return 3;
4377 + }
4378 + } while (0);
4379 +#endif
4380 +
4381 + return 1;
4382 +}
4383 +
4384 +void pax_report_insns(void *pc, void *sp)
4385 +{
4386 + unsigned long i;
4387 +
4388 + printk(KERN_ERR "PAX: bytes at PC: ");
4389 + for (i = 0; i < 8; i++) {
4390 + unsigned int c;
4391 + if (get_user(c, (unsigned int *)pc+i))
4392 + printk(KERN_CONT "???????? ");
4393 + else
4394 + printk(KERN_CONT "%08x ", c);
4395 + }
4396 + printk("\n");
4397 +}
4398 +#endif
4399 +
4400 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4401 int text_fault)
4402 {
4403 @@ -281,6 +546,24 @@ good_area:
4404 if(!(vma->vm_flags & VM_WRITE))
4405 goto bad_area;
4406 } else {
4407 +
4408 +#ifdef CONFIG_PAX_PAGEEXEC
4409 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4410 + up_read(&mm->mmap_sem);
4411 + switch (pax_handle_fetch_fault(regs)) {
4412 +
4413 +#ifdef CONFIG_PAX_EMUPLT
4414 + case 2:
4415 + case 3:
4416 + return;
4417 +#endif
4418 +
4419 + }
4420 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4421 + do_group_exit(SIGKILL);
4422 + }
4423 +#endif
4424 +
4425 /* Allow reads even for write-only mappings */
4426 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4427 goto bad_area;
4428 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_64.c linux-2.6.39.4/arch/sparc/mm/fault_64.c
4429 --- linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-05-19 00:06:34.000000000 -0400
4430 +++ linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-08-05 19:44:33.000000000 -0400
4431 @@ -21,6 +21,9 @@
4432 #include <linux/kprobes.h>
4433 #include <linux/kdebug.h>
4434 #include <linux/percpu.h>
4435 +#include <linux/slab.h>
4436 +#include <linux/pagemap.h>
4437 +#include <linux/compiler.h>
4438
4439 #include <asm/page.h>
4440 #include <asm/pgtable.h>
4441 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4442 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4443 regs->tpc);
4444 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4445 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4446 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4447 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4448 dump_stack();
4449 unhandled_fault(regs->tpc, current, regs);
4450 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4451 show_regs(regs);
4452 }
4453
4454 +#ifdef CONFIG_PAX_PAGEEXEC
4455 +#ifdef CONFIG_PAX_DLRESOLVE
4456 +static void pax_emuplt_close(struct vm_area_struct *vma)
4457 +{
4458 + vma->vm_mm->call_dl_resolve = 0UL;
4459 +}
4460 +
4461 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4462 +{
4463 + unsigned int *kaddr;
4464 +
4465 + vmf->page = alloc_page(GFP_HIGHUSER);
4466 + if (!vmf->page)
4467 + return VM_FAULT_OOM;
4468 +
4469 + kaddr = kmap(vmf->page);
4470 + memset(kaddr, 0, PAGE_SIZE);
4471 + kaddr[0] = 0x9DE3BFA8U; /* save */
4472 + flush_dcache_page(vmf->page);
4473 + kunmap(vmf->page);
4474 + return VM_FAULT_MAJOR;
4475 +}
4476 +
4477 +static const struct vm_operations_struct pax_vm_ops = {
4478 + .close = pax_emuplt_close,
4479 + .fault = pax_emuplt_fault
4480 +};
4481 +
4482 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4483 +{
4484 + int ret;
4485 +
4486 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4487 + vma->vm_mm = current->mm;
4488 + vma->vm_start = addr;
4489 + vma->vm_end = addr + PAGE_SIZE;
4490 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4491 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4492 + vma->vm_ops = &pax_vm_ops;
4493 +
4494 + ret = insert_vm_struct(current->mm, vma);
4495 + if (ret)
4496 + return ret;
4497 +
4498 + ++current->mm->total_vm;
4499 + return 0;
4500 +}
4501 +#endif
4502 +
4503 +/*
4504 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4505 + *
4506 + * returns 1 when task should be killed
4507 + * 2 when patched PLT trampoline was detected
4508 + * 3 when unpatched PLT trampoline was detected
4509 + */
4510 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4511 +{
4512 +
4513 +#ifdef CONFIG_PAX_EMUPLT
4514 + int err;
4515 +
4516 + do { /* PaX: patched PLT emulation #1 */
4517 + unsigned int sethi1, sethi2, jmpl;
4518 +
4519 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4520 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4521 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4522 +
4523 + if (err)
4524 + break;
4525 +
4526 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4527 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4528 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4529 + {
4530 + unsigned long addr;
4531 +
4532 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4533 + addr = regs->u_regs[UREG_G1];
4534 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4535 +
4536 + if (test_thread_flag(TIF_32BIT))
4537 + addr &= 0xFFFFFFFFUL;
4538 +
4539 + regs->tpc = addr;
4540 + regs->tnpc = addr+4;
4541 + return 2;
4542 + }
4543 + } while (0);
4544 +
4545 + { /* PaX: patched PLT emulation #2 */
4546 + unsigned int ba;
4547 +
4548 + err = get_user(ba, (unsigned int *)regs->tpc);
4549 +
4550 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4551 + unsigned long addr;
4552 +
4553 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4554 +
4555 + if (test_thread_flag(TIF_32BIT))
4556 + addr &= 0xFFFFFFFFUL;
4557 +
4558 + regs->tpc = addr;
4559 + regs->tnpc = addr+4;
4560 + return 2;
4561 + }
4562 + }
4563 +
4564 + do { /* PaX: patched PLT emulation #3 */
4565 + unsigned int sethi, jmpl, nop;
4566 +
4567 + err = get_user(sethi, (unsigned int *)regs->tpc);
4568 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4569 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4570 +
4571 + if (err)
4572 + break;
4573 +
4574 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4575 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4576 + nop == 0x01000000U)
4577 + {
4578 + unsigned long addr;
4579 +
4580 + addr = (sethi & 0x003FFFFFU) << 10;
4581 + regs->u_regs[UREG_G1] = addr;
4582 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4583 +
4584 + if (test_thread_flag(TIF_32BIT))
4585 + addr &= 0xFFFFFFFFUL;
4586 +
4587 + regs->tpc = addr;
4588 + regs->tnpc = addr+4;
4589 + return 2;
4590 + }
4591 + } while (0);
4592 +
4593 + do { /* PaX: patched PLT emulation #4 */
4594 + unsigned int sethi, mov1, call, mov2;
4595 +
4596 + err = get_user(sethi, (unsigned int *)regs->tpc);
4597 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4598 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4599 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4600 +
4601 + if (err)
4602 + break;
4603 +
4604 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4605 + mov1 == 0x8210000FU &&
4606 + (call & 0xC0000000U) == 0x40000000U &&
4607 + mov2 == 0x9E100001U)
4608 + {
4609 + unsigned long addr;
4610 +
4611 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4612 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4613 +
4614 + if (test_thread_flag(TIF_32BIT))
4615 + addr &= 0xFFFFFFFFUL;
4616 +
4617 + regs->tpc = addr;
4618 + regs->tnpc = addr+4;
4619 + return 2;
4620 + }
4621 + } while (0);
4622 +
4623 + do { /* PaX: patched PLT emulation #5 */
4624 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4625 +
4626 + err = get_user(sethi, (unsigned int *)regs->tpc);
4627 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4628 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4629 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4630 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4631 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4632 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4633 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4634 +
4635 + if (err)
4636 + break;
4637 +
4638 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4639 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4640 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4641 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4642 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4643 + sllx == 0x83287020U &&
4644 + jmpl == 0x81C04005U &&
4645 + nop == 0x01000000U)
4646 + {
4647 + unsigned long addr;
4648 +
4649 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4650 + regs->u_regs[UREG_G1] <<= 32;
4651 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4652 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4653 + regs->tpc = addr;
4654 + regs->tnpc = addr+4;
4655 + return 2;
4656 + }
4657 + } while (0);
4658 +
4659 + do { /* PaX: patched PLT emulation #6 */
4660 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4661 +
4662 + err = get_user(sethi, (unsigned int *)regs->tpc);
4663 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4664 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4665 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4666 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4667 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4668 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4675 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4676 + sllx == 0x83287020U &&
4677 + (or & 0xFFFFE000U) == 0x8A116000U &&
4678 + jmpl == 0x81C04005U &&
4679 + nop == 0x01000000U)
4680 + {
4681 + unsigned long addr;
4682 +
4683 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4684 + regs->u_regs[UREG_G1] <<= 32;
4685 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4686 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4687 + regs->tpc = addr;
4688 + regs->tnpc = addr+4;
4689 + return 2;
4690 + }
4691 + } while (0);
4692 +
4693 + do { /* PaX: unpatched PLT emulation step 1 */
4694 + unsigned int sethi, ba, nop;
4695 +
4696 + err = get_user(sethi, (unsigned int *)regs->tpc);
4697 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4698 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4699 +
4700 + if (err)
4701 + break;
4702 +
4703 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4704 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4705 + nop == 0x01000000U)
4706 + {
4707 + unsigned long addr;
4708 + unsigned int save, call;
4709 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4710 +
4711 + if ((ba & 0xFFC00000U) == 0x30800000U)
4712 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4713 + else
4714 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4715 +
4716 + if (test_thread_flag(TIF_32BIT))
4717 + addr &= 0xFFFFFFFFUL;
4718 +
4719 + err = get_user(save, (unsigned int *)addr);
4720 + err |= get_user(call, (unsigned int *)(addr+4));
4721 + err |= get_user(nop, (unsigned int *)(addr+8));
4722 + if (err)
4723 + break;
4724 +
4725 +#ifdef CONFIG_PAX_DLRESOLVE
4726 + if (save == 0x9DE3BFA8U &&
4727 + (call & 0xC0000000U) == 0x40000000U &&
4728 + nop == 0x01000000U)
4729 + {
4730 + struct vm_area_struct *vma;
4731 + unsigned long call_dl_resolve;
4732 +
4733 + down_read(&current->mm->mmap_sem);
4734 + call_dl_resolve = current->mm->call_dl_resolve;
4735 + up_read(&current->mm->mmap_sem);
4736 + if (likely(call_dl_resolve))
4737 + goto emulate;
4738 +
4739 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4740 +
4741 + down_write(&current->mm->mmap_sem);
4742 + if (current->mm->call_dl_resolve) {
4743 + call_dl_resolve = current->mm->call_dl_resolve;
4744 + up_write(&current->mm->mmap_sem);
4745 + if (vma)
4746 + kmem_cache_free(vm_area_cachep, vma);
4747 + goto emulate;
4748 + }
4749 +
4750 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4751 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4752 + up_write(&current->mm->mmap_sem);
4753 + if (vma)
4754 + kmem_cache_free(vm_area_cachep, vma);
4755 + return 1;
4756 + }
4757 +
4758 + if (pax_insert_vma(vma, call_dl_resolve)) {
4759 + up_write(&current->mm->mmap_sem);
4760 + kmem_cache_free(vm_area_cachep, vma);
4761 + return 1;
4762 + }
4763 +
4764 + current->mm->call_dl_resolve = call_dl_resolve;
4765 + up_write(&current->mm->mmap_sem);
4766 +
4767 +emulate:
4768 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4769 + regs->tpc = call_dl_resolve;
4770 + regs->tnpc = addr+4;
4771 + return 3;
4772 + }
4773 +#endif
4774 +
4775 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4776 + if ((save & 0xFFC00000U) == 0x05000000U &&
4777 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4778 + nop == 0x01000000U)
4779 + {
4780 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4781 + regs->u_regs[UREG_G2] = addr + 4;
4782 + addr = (save & 0x003FFFFFU) << 10;
4783 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4784 +
4785 + if (test_thread_flag(TIF_32BIT))
4786 + addr &= 0xFFFFFFFFUL;
4787 +
4788 + regs->tpc = addr;
4789 + regs->tnpc = addr+4;
4790 + return 3;
4791 + }
4792 +
4793 + /* PaX: 64-bit PLT stub */
4794 + err = get_user(sethi1, (unsigned int *)addr);
4795 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4796 + err |= get_user(or1, (unsigned int *)(addr+8));
4797 + err |= get_user(or2, (unsigned int *)(addr+12));
4798 + err |= get_user(sllx, (unsigned int *)(addr+16));
4799 + err |= get_user(add, (unsigned int *)(addr+20));
4800 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4801 + err |= get_user(nop, (unsigned int *)(addr+28));
4802 + if (err)
4803 + break;
4804 +
4805 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4806 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4807 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4808 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4809 + sllx == 0x89293020U &&
4810 + add == 0x8A010005U &&
4811 + jmpl == 0x89C14000U &&
4812 + nop == 0x01000000U)
4813 + {
4814 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4815 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4816 + regs->u_regs[UREG_G4] <<= 32;
4817 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4818 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4819 + regs->u_regs[UREG_G4] = addr + 24;
4820 + addr = regs->u_regs[UREG_G5];
4821 + regs->tpc = addr;
4822 + regs->tnpc = addr+4;
4823 + return 3;
4824 + }
4825 + }
4826 + } while (0);
4827 +
4828 +#ifdef CONFIG_PAX_DLRESOLVE
4829 + do { /* PaX: unpatched PLT emulation step 2 */
4830 + unsigned int save, call, nop;
4831 +
4832 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4833 + err |= get_user(call, (unsigned int *)regs->tpc);
4834 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4835 + if (err)
4836 + break;
4837 +
4838 + if (save == 0x9DE3BFA8U &&
4839 + (call & 0xC0000000U) == 0x40000000U &&
4840 + nop == 0x01000000U)
4841 + {
4842 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4843 +
4844 + if (test_thread_flag(TIF_32BIT))
4845 + dl_resolve &= 0xFFFFFFFFUL;
4846 +
4847 + regs->u_regs[UREG_RETPC] = regs->tpc;
4848 + regs->tpc = dl_resolve;
4849 + regs->tnpc = dl_resolve+4;
4850 + return 3;
4851 + }
4852 + } while (0);
4853 +#endif
4854 +
4855 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4856 + unsigned int sethi, ba, nop;
4857 +
4858 + err = get_user(sethi, (unsigned int *)regs->tpc);
4859 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4860 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4861 +
4862 + if (err)
4863 + break;
4864 +
4865 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4866 + (ba & 0xFFF00000U) == 0x30600000U &&
4867 + nop == 0x01000000U)
4868 + {
4869 + unsigned long addr;
4870 +
4871 + addr = (sethi & 0x003FFFFFU) << 10;
4872 + regs->u_regs[UREG_G1] = addr;
4873 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4874 +
4875 + if (test_thread_flag(TIF_32BIT))
4876 + addr &= 0xFFFFFFFFUL;
4877 +
4878 + regs->tpc = addr;
4879 + regs->tnpc = addr+4;
4880 + return 2;
4881 + }
4882 + } while (0);
4883 +
4884 +#endif
4885 +
4886 + return 1;
4887 +}
4888 +
4889 +void pax_report_insns(void *pc, void *sp)
4890 +{
4891 + unsigned long i;
4892 +
4893 + printk(KERN_ERR "PAX: bytes at PC: ");
4894 + for (i = 0; i < 8; i++) {
4895 + unsigned int c;
4896 + if (get_user(c, (unsigned int *)pc+i))
4897 + printk(KERN_CONT "???????? ");
4898 + else
4899 + printk(KERN_CONT "%08x ", c);
4900 + }
4901 + printk("\n");
4902 +}
4903 +#endif
4904 +
4905 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4906 {
4907 struct mm_struct *mm = current->mm;
4908 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4909 if (!vma)
4910 goto bad_area;
4911
4912 +#ifdef CONFIG_PAX_PAGEEXEC
4913 + /* PaX: detect ITLB misses on non-exec pages */
4914 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4915 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4916 + {
4917 + if (address != regs->tpc)
4918 + goto good_area;
4919 +
4920 + up_read(&mm->mmap_sem);
4921 + switch (pax_handle_fetch_fault(regs)) {
4922 +
4923 +#ifdef CONFIG_PAX_EMUPLT
4924 + case 2:
4925 + case 3:
4926 + return;
4927 +#endif
4928 +
4929 + }
4930 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4931 + do_group_exit(SIGKILL);
4932 + }
4933 +#endif
4934 +
4935 /* Pure DTLB misses do not tell us whether the fault causing
4936 * load/store/atomic was a write or not, it only says that there
4937 * was no match. So in such a case we (carefully) read the
4938 diff -urNp linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c
4939 --- linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
4940 +++ linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
4941 @@ -68,7 +68,7 @@ full_search:
4942 }
4943 return -ENOMEM;
4944 }
4945 - if (likely(!vma || addr + len <= vma->vm_start)) {
4946 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4947 /*
4948 * Remember the place where we stopped the search:
4949 */
4950 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4951 /* make sure it can fit in the remaining address space */
4952 if (likely(addr > len)) {
4953 vma = find_vma(mm, addr-len);
4954 - if (!vma || addr <= vma->vm_start) {
4955 + if (check_heap_stack_gap(vma, addr - len, len)) {
4956 /* remember the address as a hint for next time */
4957 return (mm->free_area_cache = addr-len);
4958 }
4959 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4960 if (unlikely(mm->mmap_base < len))
4961 goto bottomup;
4962
4963 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4964 + addr = mm->mmap_base - len;
4965
4966 do {
4967 + addr &= HPAGE_MASK;
4968 /*
4969 * Lookup failure means no vma is above this address,
4970 * else if new region fits below vma->vm_start,
4971 * return with success:
4972 */
4973 vma = find_vma(mm, addr);
4974 - if (likely(!vma || addr+len <= vma->vm_start)) {
4975 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4976 /* remember the address as a hint for next time */
4977 return (mm->free_area_cache = addr);
4978 }
4979 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4980 mm->cached_hole_size = vma->vm_start - addr;
4981
4982 /* try just below the current vma->vm_start */
4983 - addr = (vma->vm_start-len) & HPAGE_MASK;
4984 - } while (likely(len < vma->vm_start));
4985 + addr = skip_heap_stack_gap(vma, len);
4986 + } while (!IS_ERR_VALUE(addr));
4987
4988 bottomup:
4989 /*
4990 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
4991 if (addr) {
4992 addr = ALIGN(addr, HPAGE_SIZE);
4993 vma = find_vma(mm, addr);
4994 - if (task_size - len >= addr &&
4995 - (!vma || addr + len <= vma->vm_start))
4996 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4997 return addr;
4998 }
4999 if (mm->get_unmapped_area == arch_get_unmapped_area)
5000 diff -urNp linux-2.6.39.4/arch/sparc/mm/init_32.c linux-2.6.39.4/arch/sparc/mm/init_32.c
5001 --- linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
5002 +++ linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-08-05 19:44:33.000000000 -0400
5003 @@ -318,6 +318,9 @@ extern void device_scan(void);
5004 pgprot_t PAGE_SHARED __read_mostly;
5005 EXPORT_SYMBOL(PAGE_SHARED);
5006
5007 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5008 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5009 +
5010 void __init paging_init(void)
5011 {
5012 switch(sparc_cpu_model) {
5013 @@ -346,17 +349,17 @@ void __init paging_init(void)
5014
5015 /* Initialize the protection map with non-constant, MMU dependent values. */
5016 protection_map[0] = PAGE_NONE;
5017 - protection_map[1] = PAGE_READONLY;
5018 - protection_map[2] = PAGE_COPY;
5019 - protection_map[3] = PAGE_COPY;
5020 + protection_map[1] = PAGE_READONLY_NOEXEC;
5021 + protection_map[2] = PAGE_COPY_NOEXEC;
5022 + protection_map[3] = PAGE_COPY_NOEXEC;
5023 protection_map[4] = PAGE_READONLY;
5024 protection_map[5] = PAGE_READONLY;
5025 protection_map[6] = PAGE_COPY;
5026 protection_map[7] = PAGE_COPY;
5027 protection_map[8] = PAGE_NONE;
5028 - protection_map[9] = PAGE_READONLY;
5029 - protection_map[10] = PAGE_SHARED;
5030 - protection_map[11] = PAGE_SHARED;
5031 + protection_map[9] = PAGE_READONLY_NOEXEC;
5032 + protection_map[10] = PAGE_SHARED_NOEXEC;
5033 + protection_map[11] = PAGE_SHARED_NOEXEC;
5034 protection_map[12] = PAGE_READONLY;
5035 protection_map[13] = PAGE_READONLY;
5036 protection_map[14] = PAGE_SHARED;
5037 diff -urNp linux-2.6.39.4/arch/sparc/mm/Makefile linux-2.6.39.4/arch/sparc/mm/Makefile
5038 --- linux-2.6.39.4/arch/sparc/mm/Makefile 2011-05-19 00:06:34.000000000 -0400
5039 +++ linux-2.6.39.4/arch/sparc/mm/Makefile 2011-08-05 19:44:33.000000000 -0400
5040 @@ -2,7 +2,7 @@
5041 #
5042
5043 asflags-y := -ansi
5044 -ccflags-y := -Werror
5045 +#ccflags-y := -Werror
5046
5047 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5048 obj-y += fault_$(BITS).o
5049 diff -urNp linux-2.6.39.4/arch/sparc/mm/srmmu.c linux-2.6.39.4/arch/sparc/mm/srmmu.c
5050 --- linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-05-19 00:06:34.000000000 -0400
5051 +++ linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-08-05 19:44:33.000000000 -0400
5052 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5053 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5054 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5055 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5056 +
5057 +#ifdef CONFIG_PAX_PAGEEXEC
5058 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5059 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5060 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5061 +#endif
5062 +
5063 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5064 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5065
5066 diff -urNp linux-2.6.39.4/arch/um/include/asm/kmap_types.h linux-2.6.39.4/arch/um/include/asm/kmap_types.h
5067 --- linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
5068 +++ linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
5069 @@ -23,6 +23,7 @@ enum km_type {
5070 KM_IRQ1,
5071 KM_SOFTIRQ0,
5072 KM_SOFTIRQ1,
5073 + KM_CLEARPAGE,
5074 KM_TYPE_NR
5075 };
5076
5077 diff -urNp linux-2.6.39.4/arch/um/include/asm/page.h linux-2.6.39.4/arch/um/include/asm/page.h
5078 --- linux-2.6.39.4/arch/um/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
5079 +++ linux-2.6.39.4/arch/um/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
5080 @@ -14,6 +14,9 @@
5081 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5082 #define PAGE_MASK (~(PAGE_SIZE-1))
5083
5084 +#define ktla_ktva(addr) (addr)
5085 +#define ktva_ktla(addr) (addr)
5086 +
5087 #ifndef __ASSEMBLY__
5088
5089 struct page;
5090 diff -urNp linux-2.6.39.4/arch/um/kernel/process.c linux-2.6.39.4/arch/um/kernel/process.c
5091 --- linux-2.6.39.4/arch/um/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
5092 +++ linux-2.6.39.4/arch/um/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
5093 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5094 return 2;
5095 }
5096
5097 -/*
5098 - * Only x86 and x86_64 have an arch_align_stack().
5099 - * All other arches have "#define arch_align_stack(x) (x)"
5100 - * in their asm/system.h
5101 - * As this is included in UML from asm-um/system-generic.h,
5102 - * we can use it to behave as the subarch does.
5103 - */
5104 -#ifndef arch_align_stack
5105 -unsigned long arch_align_stack(unsigned long sp)
5106 -{
5107 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5108 - sp -= get_random_int() % 8192;
5109 - return sp & ~0xf;
5110 -}
5111 -#endif
5112 -
5113 unsigned long get_wchan(struct task_struct *p)
5114 {
5115 unsigned long stack_page, sp, ip;
5116 diff -urNp linux-2.6.39.4/arch/um/sys-i386/syscalls.c linux-2.6.39.4/arch/um/sys-i386/syscalls.c
5117 --- linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-05-19 00:06:34.000000000 -0400
5118 +++ linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-08-05 19:44:33.000000000 -0400
5119 @@ -11,6 +11,21 @@
5120 #include "asm/uaccess.h"
5121 #include "asm/unistd.h"
5122
5123 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5124 +{
5125 + unsigned long pax_task_size = TASK_SIZE;
5126 +
5127 +#ifdef CONFIG_PAX_SEGMEXEC
5128 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5129 + pax_task_size = SEGMEXEC_TASK_SIZE;
5130 +#endif
5131 +
5132 + if (len > pax_task_size || addr > pax_task_size - len)
5133 + return -EINVAL;
5134 +
5135 + return 0;
5136 +}
5137 +
5138 /*
5139 * The prototype on i386 is:
5140 *
5141 diff -urNp linux-2.6.39.4/arch/x86/boot/bitops.h linux-2.6.39.4/arch/x86/boot/bitops.h
5142 --- linux-2.6.39.4/arch/x86/boot/bitops.h 2011-05-19 00:06:34.000000000 -0400
5143 +++ linux-2.6.39.4/arch/x86/boot/bitops.h 2011-08-05 19:44:33.000000000 -0400
5144 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5145 u8 v;
5146 const u32 *p = (const u32 *)addr;
5147
5148 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5149 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5150 return v;
5151 }
5152
5153 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5154
5155 static inline void set_bit(int nr, void *addr)
5156 {
5157 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5158 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5159 }
5160
5161 #endif /* BOOT_BITOPS_H */
5162 diff -urNp linux-2.6.39.4/arch/x86/boot/boot.h linux-2.6.39.4/arch/x86/boot/boot.h
5163 --- linux-2.6.39.4/arch/x86/boot/boot.h 2011-05-19 00:06:34.000000000 -0400
5164 +++ linux-2.6.39.4/arch/x86/boot/boot.h 2011-08-05 19:44:33.000000000 -0400
5165 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5166 static inline u16 ds(void)
5167 {
5168 u16 seg;
5169 - asm("movw %%ds,%0" : "=rm" (seg));
5170 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5171 return seg;
5172 }
5173
5174 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5175 static inline int memcmp(const void *s1, const void *s2, size_t len)
5176 {
5177 u8 diff;
5178 - asm("repe; cmpsb; setnz %0"
5179 + asm volatile("repe; cmpsb; setnz %0"
5180 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5181 return diff;
5182 }
5183 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_32.S linux-2.6.39.4/arch/x86/boot/compressed/head_32.S
5184 --- linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-05-19 00:06:34.000000000 -0400
5185 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-08-05 19:44:33.000000000 -0400
5186 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5187 notl %eax
5188 andl %eax, %ebx
5189 #else
5190 - movl $LOAD_PHYSICAL_ADDR, %ebx
5191 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5192 #endif
5193
5194 /* Target address to relocate to for decompression */
5195 @@ -162,7 +162,7 @@ relocated:
5196 * and where it was actually loaded.
5197 */
5198 movl %ebp, %ebx
5199 - subl $LOAD_PHYSICAL_ADDR, %ebx
5200 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5201 jz 2f /* Nothing to be done if loaded at compiled addr. */
5202 /*
5203 * Process relocations.
5204 @@ -170,8 +170,7 @@ relocated:
5205
5206 1: subl $4, %edi
5207 movl (%edi), %ecx
5208 - testl %ecx, %ecx
5209 - jz 2f
5210 + jecxz 2f
5211 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5212 jmp 1b
5213 2:
5214 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_64.S linux-2.6.39.4/arch/x86/boot/compressed/head_64.S
5215 --- linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-05-19 00:06:34.000000000 -0400
5216 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-08-05 19:44:33.000000000 -0400
5217 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5218 notl %eax
5219 andl %eax, %ebx
5220 #else
5221 - movl $LOAD_PHYSICAL_ADDR, %ebx
5222 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5223 #endif
5224
5225 /* Target address to relocate to for decompression */
5226 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5227 notq %rax
5228 andq %rax, %rbp
5229 #else
5230 - movq $LOAD_PHYSICAL_ADDR, %rbp
5231 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5232 #endif
5233
5234 /* Target address to relocate to for decompression */
5235 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/Makefile linux-2.6.39.4/arch/x86/boot/compressed/Makefile
5236 --- linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-05-19 00:06:34.000000000 -0400
5237 +++ linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-08-05 20:34:06.000000000 -0400
5238 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5239 KBUILD_CFLAGS += $(cflags-y)
5240 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5241 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5242 +ifdef CONSTIFY_PLUGIN
5243 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5244 +endif
5245
5246 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5247 GCOV_PROFILE := n
5248 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/misc.c linux-2.6.39.4/arch/x86/boot/compressed/misc.c
5249 --- linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-05-19 00:06:34.000000000 -0400
5250 +++ linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-08-05 19:44:33.000000000 -0400
5251 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5252 case PT_LOAD:
5253 #ifdef CONFIG_RELOCATABLE
5254 dest = output;
5255 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5256 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5257 #else
5258 dest = (void *)(phdr->p_paddr);
5259 #endif
5260 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5261 error("Destination address too large");
5262 #endif
5263 #ifndef CONFIG_RELOCATABLE
5264 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5265 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5266 error("Wrong destination address");
5267 #endif
5268
5269 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/relocs.c linux-2.6.39.4/arch/x86/boot/compressed/relocs.c
5270 --- linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-05-19 00:06:34.000000000 -0400
5271 +++ linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-08-05 19:44:33.000000000 -0400
5272 @@ -13,8 +13,11 @@
5273
5274 static void die(char *fmt, ...);
5275
5276 +#include "../../../../include/generated/autoconf.h"
5277 +
5278 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5279 static Elf32_Ehdr ehdr;
5280 +static Elf32_Phdr *phdr;
5281 static unsigned long reloc_count, reloc_idx;
5282 static unsigned long *relocs;
5283
5284 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5285 }
5286 }
5287
5288 +static void read_phdrs(FILE *fp)
5289 +{
5290 + unsigned int i;
5291 +
5292 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5293 + if (!phdr) {
5294 + die("Unable to allocate %d program headers\n",
5295 + ehdr.e_phnum);
5296 + }
5297 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5298 + die("Seek to %d failed: %s\n",
5299 + ehdr.e_phoff, strerror(errno));
5300 + }
5301 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5302 + die("Cannot read ELF program headers: %s\n",
5303 + strerror(errno));
5304 + }
5305 + for(i = 0; i < ehdr.e_phnum; i++) {
5306 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5307 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5308 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5309 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5310 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5311 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5312 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5313 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5314 + }
5315 +
5316 +}
5317 +
5318 static void read_shdrs(FILE *fp)
5319 {
5320 - int i;
5321 + unsigned int i;
5322 Elf32_Shdr shdr;
5323
5324 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5325 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5326
5327 static void read_strtabs(FILE *fp)
5328 {
5329 - int i;
5330 + unsigned int i;
5331 for (i = 0; i < ehdr.e_shnum; i++) {
5332 struct section *sec = &secs[i];
5333 if (sec->shdr.sh_type != SHT_STRTAB) {
5334 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5335
5336 static void read_symtabs(FILE *fp)
5337 {
5338 - int i,j;
5339 + unsigned int i,j;
5340 for (i = 0; i < ehdr.e_shnum; i++) {
5341 struct section *sec = &secs[i];
5342 if (sec->shdr.sh_type != SHT_SYMTAB) {
5343 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5344
5345 static void read_relocs(FILE *fp)
5346 {
5347 - int i,j;
5348 + unsigned int i,j;
5349 + uint32_t base;
5350 +
5351 for (i = 0; i < ehdr.e_shnum; i++) {
5352 struct section *sec = &secs[i];
5353 if (sec->shdr.sh_type != SHT_REL) {
5354 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5355 die("Cannot read symbol table: %s\n",
5356 strerror(errno));
5357 }
5358 + base = 0;
5359 + for (j = 0; j < ehdr.e_phnum; j++) {
5360 + if (phdr[j].p_type != PT_LOAD )
5361 + continue;
5362 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5363 + continue;
5364 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5365 + break;
5366 + }
5367 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5368 Elf32_Rel *rel = &sec->reltab[j];
5369 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5370 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5371 rel->r_info = elf32_to_cpu(rel->r_info);
5372 }
5373 }
5374 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5375
5376 static void print_absolute_symbols(void)
5377 {
5378 - int i;
5379 + unsigned int i;
5380 printf("Absolute symbols\n");
5381 printf(" Num: Value Size Type Bind Visibility Name\n");
5382 for (i = 0; i < ehdr.e_shnum; i++) {
5383 struct section *sec = &secs[i];
5384 char *sym_strtab;
5385 Elf32_Sym *sh_symtab;
5386 - int j;
5387 + unsigned int j;
5388
5389 if (sec->shdr.sh_type != SHT_SYMTAB) {
5390 continue;
5391 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5392
5393 static void print_absolute_relocs(void)
5394 {
5395 - int i, printed = 0;
5396 + unsigned int i, printed = 0;
5397
5398 for (i = 0; i < ehdr.e_shnum; i++) {
5399 struct section *sec = &secs[i];
5400 struct section *sec_applies, *sec_symtab;
5401 char *sym_strtab;
5402 Elf32_Sym *sh_symtab;
5403 - int j;
5404 + unsigned int j;
5405 if (sec->shdr.sh_type != SHT_REL) {
5406 continue;
5407 }
5408 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5409
5410 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5411 {
5412 - int i;
5413 + unsigned int i;
5414 /* Walk through the relocations */
5415 for (i = 0; i < ehdr.e_shnum; i++) {
5416 char *sym_strtab;
5417 Elf32_Sym *sh_symtab;
5418 struct section *sec_applies, *sec_symtab;
5419 - int j;
5420 + unsigned int j;
5421 struct section *sec = &secs[i];
5422
5423 if (sec->shdr.sh_type != SHT_REL) {
5424 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5425 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5426 continue;
5427 }
5428 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5429 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5430 + continue;
5431 +
5432 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5433 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5434 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5435 + continue;
5436 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5437 + continue;
5438 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5439 + continue;
5440 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5441 + continue;
5442 +#endif
5443 +
5444 switch (r_type) {
5445 case R_386_NONE:
5446 case R_386_PC32:
5447 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5448
5449 static void emit_relocs(int as_text)
5450 {
5451 - int i;
5452 + unsigned int i;
5453 /* Count how many relocations I have and allocate space for them. */
5454 reloc_count = 0;
5455 walk_relocs(count_reloc);
5456 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5457 fname, strerror(errno));
5458 }
5459 read_ehdr(fp);
5460 + read_phdrs(fp);
5461 read_shdrs(fp);
5462 read_strtabs(fp);
5463 read_symtabs(fp);
5464 diff -urNp linux-2.6.39.4/arch/x86/boot/cpucheck.c linux-2.6.39.4/arch/x86/boot/cpucheck.c
5465 --- linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-05-19 00:06:34.000000000 -0400
5466 +++ linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-08-05 19:44:33.000000000 -0400
5467 @@ -74,7 +74,7 @@ static int has_fpu(void)
5468 u16 fcw = -1, fsw = -1;
5469 u32 cr0;
5470
5471 - asm("movl %%cr0,%0" : "=r" (cr0));
5472 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5473 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5474 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5475 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5476 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5477 {
5478 u32 f0, f1;
5479
5480 - asm("pushfl ; "
5481 + asm volatile("pushfl ; "
5482 "pushfl ; "
5483 "popl %0 ; "
5484 "movl %0,%1 ; "
5485 @@ -115,7 +115,7 @@ static void get_flags(void)
5486 set_bit(X86_FEATURE_FPU, cpu.flags);
5487
5488 if (has_eflag(X86_EFLAGS_ID)) {
5489 - asm("cpuid"
5490 + asm volatile("cpuid"
5491 : "=a" (max_intel_level),
5492 "=b" (cpu_vendor[0]),
5493 "=d" (cpu_vendor[1]),
5494 @@ -124,7 +124,7 @@ static void get_flags(void)
5495
5496 if (max_intel_level >= 0x00000001 &&
5497 max_intel_level <= 0x0000ffff) {
5498 - asm("cpuid"
5499 + asm volatile("cpuid"
5500 : "=a" (tfms),
5501 "=c" (cpu.flags[4]),
5502 "=d" (cpu.flags[0])
5503 @@ -136,7 +136,7 @@ static void get_flags(void)
5504 cpu.model += ((tfms >> 16) & 0xf) << 4;
5505 }
5506
5507 - asm("cpuid"
5508 + asm volatile("cpuid"
5509 : "=a" (max_amd_level)
5510 : "a" (0x80000000)
5511 : "ebx", "ecx", "edx");
5512 @@ -144,7 +144,7 @@ static void get_flags(void)
5513 if (max_amd_level >= 0x80000001 &&
5514 max_amd_level <= 0x8000ffff) {
5515 u32 eax = 0x80000001;
5516 - asm("cpuid"
5517 + asm volatile("cpuid"
5518 : "+a" (eax),
5519 "=c" (cpu.flags[6]),
5520 "=d" (cpu.flags[1])
5521 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5522 u32 ecx = MSR_K7_HWCR;
5523 u32 eax, edx;
5524
5525 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5526 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5527 eax &= ~(1 << 15);
5528 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5529 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5530
5531 get_flags(); /* Make sure it really did something */
5532 err = check_flags();
5533 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5534 u32 ecx = MSR_VIA_FCR;
5535 u32 eax, edx;
5536
5537 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5538 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5539 eax |= (1<<1)|(1<<7);
5540 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5541 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5542
5543 set_bit(X86_FEATURE_CX8, cpu.flags);
5544 err = check_flags();
5545 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5546 u32 eax, edx;
5547 u32 level = 1;
5548
5549 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5550 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5551 - asm("cpuid"
5552 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5553 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5554 + asm volatile("cpuid"
5555 : "+a" (level), "=d" (cpu.flags[0])
5556 : : "ecx", "ebx");
5557 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5558 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5559
5560 err = check_flags();
5561 }
5562 diff -urNp linux-2.6.39.4/arch/x86/boot/header.S linux-2.6.39.4/arch/x86/boot/header.S
5563 --- linux-2.6.39.4/arch/x86/boot/header.S 2011-05-19 00:06:34.000000000 -0400
5564 +++ linux-2.6.39.4/arch/x86/boot/header.S 2011-08-05 19:44:33.000000000 -0400
5565 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5566 # single linked list of
5567 # struct setup_data
5568
5569 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5570 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5571
5572 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5573 #define VO_INIT_SIZE (VO__end - VO__text)
5574 diff -urNp linux-2.6.39.4/arch/x86/boot/Makefile linux-2.6.39.4/arch/x86/boot/Makefile
5575 --- linux-2.6.39.4/arch/x86/boot/Makefile 2011-05-19 00:06:34.000000000 -0400
5576 +++ linux-2.6.39.4/arch/x86/boot/Makefile 2011-08-05 20:34:06.000000000 -0400
5577 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5578 $(call cc-option, -fno-stack-protector) \
5579 $(call cc-option, -mpreferred-stack-boundary=2)
5580 KBUILD_CFLAGS += $(call cc-option, -m32)
5581 +ifdef CONSTIFY_PLUGIN
5582 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5583 +endif
5584 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5585 GCOV_PROFILE := n
5586
5587 diff -urNp linux-2.6.39.4/arch/x86/boot/memory.c linux-2.6.39.4/arch/x86/boot/memory.c
5588 --- linux-2.6.39.4/arch/x86/boot/memory.c 2011-05-19 00:06:34.000000000 -0400
5589 +++ linux-2.6.39.4/arch/x86/boot/memory.c 2011-08-05 19:44:33.000000000 -0400
5590 @@ -19,7 +19,7 @@
5591
5592 static int detect_memory_e820(void)
5593 {
5594 - int count = 0;
5595 + unsigned int count = 0;
5596 struct biosregs ireg, oreg;
5597 struct e820entry *desc = boot_params.e820_map;
5598 static struct e820entry buf; /* static so it is zeroed */
5599 diff -urNp linux-2.6.39.4/arch/x86/boot/video.c linux-2.6.39.4/arch/x86/boot/video.c
5600 --- linux-2.6.39.4/arch/x86/boot/video.c 2011-05-19 00:06:34.000000000 -0400
5601 +++ linux-2.6.39.4/arch/x86/boot/video.c 2011-08-05 19:44:33.000000000 -0400
5602 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5603 static unsigned int get_entry(void)
5604 {
5605 char entry_buf[4];
5606 - int i, len = 0;
5607 + unsigned int i, len = 0;
5608 int key;
5609 unsigned int v;
5610
5611 diff -urNp linux-2.6.39.4/arch/x86/boot/video-vesa.c linux-2.6.39.4/arch/x86/boot/video-vesa.c
5612 --- linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-05-19 00:06:34.000000000 -0400
5613 +++ linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-08-05 19:44:33.000000000 -0400
5614 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5615
5616 boot_params.screen_info.vesapm_seg = oreg.es;
5617 boot_params.screen_info.vesapm_off = oreg.di;
5618 + boot_params.screen_info.vesapm_size = oreg.cx;
5619 }
5620
5621 /*
5622 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_aout.c linux-2.6.39.4/arch/x86/ia32/ia32_aout.c
5623 --- linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-05-19 00:06:34.000000000 -0400
5624 +++ linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-08-05 19:44:33.000000000 -0400
5625 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5626 unsigned long dump_start, dump_size;
5627 struct user32 dump;
5628
5629 + memset(&dump, 0, sizeof(dump));
5630 +
5631 fs = get_fs();
5632 set_fs(KERNEL_DS);
5633 has_dumped = 1;
5634 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32entry.S linux-2.6.39.4/arch/x86/ia32/ia32entry.S
5635 --- linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-05-19 00:06:34.000000000 -0400
5636 +++ linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-08-05 19:44:33.000000000 -0400
5637 @@ -13,6 +13,7 @@
5638 #include <asm/thread_info.h>
5639 #include <asm/segment.h>
5640 #include <asm/irqflags.h>
5641 +#include <asm/pgtable.h>
5642 #include <linux/linkage.h>
5643
5644 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5645 @@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
5646 ENDPROC(native_irq_enable_sysexit)
5647 #endif
5648
5649 + .macro pax_enter_kernel_user
5650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5651 + call pax_enter_kernel_user
5652 +#endif
5653 + .endm
5654 +
5655 + .macro pax_exit_kernel_user
5656 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5657 + call pax_exit_kernel_user
5658 +#endif
5659 +#ifdef CONFIG_PAX_RANDKSTACK
5660 + pushq %rax
5661 + call pax_randomize_kstack
5662 + popq %rax
5663 +#endif
5664 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5665 + call pax_erase_kstack
5666 +#endif
5667 + .endm
5668 +
5669 + .macro pax_erase_kstack
5670 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5671 + call pax_erase_kstack
5672 +#endif
5673 + .endm
5674 +
5675 /*
5676 * 32bit SYSENTER instruction entry.
5677 *
5678 @@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
5679 CFI_REGISTER rsp,rbp
5680 SWAPGS_UNSAFE_STACK
5681 movq PER_CPU_VAR(kernel_stack), %rsp
5682 - addq $(KERNEL_STACK_OFFSET),%rsp
5683 + pax_enter_kernel_user
5684 /*
5685 * No need to follow this irqs on/off section: the syscall
5686 * disabled irqs, here we enable it straight after entry:
5687 @@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
5688 CFI_REL_OFFSET rsp,0
5689 pushfq_cfi
5690 /*CFI_REL_OFFSET rflags,0*/
5691 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5692 + GET_THREAD_INFO(%r10)
5693 + movl TI_sysenter_return(%r10), %r10d
5694 CFI_REGISTER rip,r10
5695 pushq_cfi $__USER32_CS
5696 /*CFI_REL_OFFSET cs,0*/
5697 @@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
5698 SAVE_ARGS 0,0,1
5699 /* no need to do an access_ok check here because rbp has been
5700 32bit zero extended */
5701 +
5702 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5703 + mov $PAX_USER_SHADOW_BASE,%r10
5704 + add %r10,%rbp
5705 +#endif
5706 +
5707 1: movl (%rbp),%ebp
5708 .section __ex_table,"a"
5709 .quad 1b,ia32_badarg
5710 @@ -168,6 +202,7 @@ sysenter_dispatch:
5711 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5712 jnz sysexit_audit
5713 sysexit_from_sys_call:
5714 + pax_exit_kernel_user
5715 andl $~TS_COMPAT,TI_status(%r10)
5716 /* clear IF, that popfq doesn't enable interrupts early */
5717 andl $~0x200,EFLAGS-R11(%rsp)
5718 @@ -194,6 +229,9 @@ sysexit_from_sys_call:
5719 movl %eax,%esi /* 2nd arg: syscall number */
5720 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5721 call audit_syscall_entry
5722 +
5723 + pax_erase_kstack
5724 +
5725 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5726 cmpq $(IA32_NR_syscalls-1),%rax
5727 ja ia32_badsys
5728 @@ -246,6 +284,9 @@ sysenter_tracesys:
5729 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5730 movq %rsp,%rdi /* &pt_regs -> arg1 */
5731 call syscall_trace_enter
5732 +
5733 + pax_erase_kstack
5734 +
5735 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5736 RESTORE_REST
5737 cmpq $(IA32_NR_syscalls-1),%rax
5738 @@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
5739 ENTRY(ia32_cstar_target)
5740 CFI_STARTPROC32 simple
5741 CFI_SIGNAL_FRAME
5742 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5743 + CFI_DEF_CFA rsp,0
5744 CFI_REGISTER rip,rcx
5745 /*CFI_REGISTER rflags,r11*/
5746 SWAPGS_UNSAFE_STACK
5747 movl %esp,%r8d
5748 CFI_REGISTER rsp,r8
5749 movq PER_CPU_VAR(kernel_stack),%rsp
5750 +
5751 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5752 + pax_enter_kernel_user
5753 +#endif
5754 +
5755 /*
5756 * No need to follow this irqs on/off section: the syscall
5757 * disabled irqs and here we enable it straight after entry:
5758 */
5759 ENABLE_INTERRUPTS(CLBR_NONE)
5760 - SAVE_ARGS 8,1,1
5761 + SAVE_ARGS 8*6,1,1
5762 movl %eax,%eax /* zero extension */
5763 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5764 movq %rcx,RIP-ARGOFFSET(%rsp)
5765 @@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
5766 /* no need to do an access_ok check here because r8 has been
5767 32bit zero extended */
5768 /* hardware stack frame is complete now */
5769 +
5770 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5771 + mov $PAX_USER_SHADOW_BASE,%r10
5772 + add %r10,%r8
5773 +#endif
5774 +
5775 1: movl (%r8),%r9d
5776 .section __ex_table,"a"
5777 .quad 1b,ia32_badarg
5778 @@ -327,6 +379,7 @@ cstar_dispatch:
5779 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5780 jnz sysretl_audit
5781 sysretl_from_sys_call:
5782 + pax_exit_kernel_user
5783 andl $~TS_COMPAT,TI_status(%r10)
5784 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5785 movl RIP-ARGOFFSET(%rsp),%ecx
5786 @@ -364,6 +417,9 @@ cstar_tracesys:
5787 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5788 movq %rsp,%rdi /* &pt_regs -> arg1 */
5789 call syscall_trace_enter
5790 +
5791 + pax_erase_kstack
5792 +
5793 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5794 RESTORE_REST
5795 xchgl %ebp,%r9d
5796 @@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5797 CFI_REL_OFFSET rip,RIP-RIP
5798 PARAVIRT_ADJUST_EXCEPTION_FRAME
5799 SWAPGS
5800 + pax_enter_kernel_user
5801 /*
5802 * No need to follow this irqs on/off section: the syscall
5803 * disabled irqs and here we enable it straight after entry:
5804 @@ -441,6 +498,9 @@ ia32_tracesys:
5805 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5806 movq %rsp,%rdi /* &pt_regs -> arg1 */
5807 call syscall_trace_enter
5808 +
5809 + pax_erase_kstack
5810 +
5811 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5812 RESTORE_REST
5813 cmpq $(IA32_NR_syscalls-1),%rax
5814 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_signal.c linux-2.6.39.4/arch/x86/ia32/ia32_signal.c
5815 --- linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-05-19 00:06:34.000000000 -0400
5816 +++ linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-08-05 19:44:33.000000000 -0400
5817 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5818 sp -= frame_size;
5819 /* Align the stack pointer according to the i386 ABI,
5820 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5821 - sp = ((sp + 4) & -16ul) - 4;
5822 + sp = ((sp - 12) & -16ul) - 4;
5823 return (void __user *) sp;
5824 }
5825
5826 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5827 * These are actually not used anymore, but left because some
5828 * gdb versions depend on them as a marker.
5829 */
5830 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5831 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5832 } put_user_catch(err);
5833
5834 if (err)
5835 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5836 0xb8,
5837 __NR_ia32_rt_sigreturn,
5838 0x80cd,
5839 - 0,
5840 + 0
5841 };
5842
5843 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5844 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5845
5846 if (ka->sa.sa_flags & SA_RESTORER)
5847 restorer = ka->sa.sa_restorer;
5848 + else if (current->mm->context.vdso)
5849 + /* Return stub is in 32bit vsyscall page */
5850 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5851 else
5852 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5853 - rt_sigreturn);
5854 + restorer = &frame->retcode;
5855 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5856
5857 /*
5858 * Not actually used anymore, but left because some gdb
5859 * versions need it.
5860 */
5861 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5862 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5863 } put_user_catch(err);
5864
5865 if (err)
5866 diff -urNp linux-2.6.39.4/arch/x86/include/asm/alternative.h linux-2.6.39.4/arch/x86/include/asm/alternative.h
5867 --- linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-05-19 00:06:34.000000000 -0400
5868 +++ linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-08-05 19:44:33.000000000 -0400
5869 @@ -94,7 +94,7 @@ static inline int alternatives_text_rese
5870 ".section .discard,\"aw\",@progbits\n" \
5871 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5872 ".previous\n" \
5873 - ".section .altinstr_replacement, \"ax\"\n" \
5874 + ".section .altinstr_replacement, \"a\"\n" \
5875 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5876 ".previous"
5877
5878 diff -urNp linux-2.6.39.4/arch/x86/include/asm/apm.h linux-2.6.39.4/arch/x86/include/asm/apm.h
5879 --- linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-05-19 00:06:34.000000000 -0400
5880 +++ linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-08-05 19:44:33.000000000 -0400
5881 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5882 __asm__ __volatile__(APM_DO_ZERO_SEGS
5883 "pushl %%edi\n\t"
5884 "pushl %%ebp\n\t"
5885 - "lcall *%%cs:apm_bios_entry\n\t"
5886 + "lcall *%%ss:apm_bios_entry\n\t"
5887 "setc %%al\n\t"
5888 "popl %%ebp\n\t"
5889 "popl %%edi\n\t"
5890 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5891 __asm__ __volatile__(APM_DO_ZERO_SEGS
5892 "pushl %%edi\n\t"
5893 "pushl %%ebp\n\t"
5894 - "lcall *%%cs:apm_bios_entry\n\t"
5895 + "lcall *%%ss:apm_bios_entry\n\t"
5896 "setc %%bl\n\t"
5897 "popl %%ebp\n\t"
5898 "popl %%edi\n\t"
5899 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h
5900 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-05-19 00:06:34.000000000 -0400
5901 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-08-05 19:44:33.000000000 -0400
5902 @@ -12,6 +12,14 @@ typedef struct {
5903 u64 __aligned(8) counter;
5904 } atomic64_t;
5905
5906 +#ifdef CONFIG_PAX_REFCOUNT
5907 +typedef struct {
5908 + u64 __aligned(8) counter;
5909 +} atomic64_unchecked_t;
5910 +#else
5911 +typedef atomic64_t atomic64_unchecked_t;
5912 +#endif
5913 +
5914 #define ATOMIC64_INIT(val) { (val) }
5915
5916 #ifdef CONFIG_X86_CMPXCHG64
5917 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5918 }
5919
5920 /**
5921 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5922 + * @p: pointer to type atomic64_unchecked_t
5923 + * @o: expected value
5924 + * @n: new value
5925 + *
5926 + * Atomically sets @v to @n if it was equal to @o and returns
5927 + * the old value.
5928 + */
5929 +
5930 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5931 +{
5932 + return cmpxchg64(&v->counter, o, n);
5933 +}
5934 +
5935 +/**
5936 * atomic64_xchg - xchg atomic64 variable
5937 * @v: pointer to type atomic64_t
5938 * @n: value to assign
5939 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5940 }
5941
5942 /**
5943 + * atomic64_set_unchecked - set atomic64 variable
5944 + * @v: pointer to type atomic64_unchecked_t
5945 + * @n: value to assign
5946 + *
5947 + * Atomically sets the value of @v to @n.
5948 + */
5949 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5950 +{
5951 + unsigned high = (unsigned)(i >> 32);
5952 + unsigned low = (unsigned)i;
5953 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5954 + : "+b" (low), "+c" (high)
5955 + : "S" (v)
5956 + : "eax", "edx", "memory"
5957 + );
5958 +}
5959 +
5960 +/**
5961 * atomic64_read - read atomic64 variable
5962 * @v: pointer to type atomic64_t
5963 *
5964 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5965 }
5966
5967 /**
5968 + * atomic64_read_unchecked - read atomic64 variable
5969 + * @v: pointer to type atomic64_unchecked_t
5970 + *
5971 + * Atomically reads the value of @v and returns it.
5972 + */
5973 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
5974 +{
5975 + long long r;
5976 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
5977 + : "=A" (r), "+c" (v)
5978 + : : "memory"
5979 + );
5980 + return r;
5981 + }
5982 +
5983 +/**
5984 * atomic64_add_return - add and return
5985 * @i: integer value to add
5986 * @v: pointer to type atomic64_t
5987 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
5988 return i;
5989 }
5990
5991 +/**
5992 + * atomic64_add_return_unchecked - add and return
5993 + * @i: integer value to add
5994 + * @v: pointer to type atomic64_unchecked_t
5995 + *
5996 + * Atomically adds @i to @v and returns @i + *@v
5997 + */
5998 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
5999 +{
6000 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6001 + : "+A" (i), "+c" (v)
6002 + : : "memory"
6003 + );
6004 + return i;
6005 +}
6006 +
6007 /*
6008 * Other variants with different arithmetic operators:
6009 */
6010 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6011 return a;
6012 }
6013
6014 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6015 +{
6016 + long long a;
6017 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6018 + : "=A" (a)
6019 + : "S" (v)
6020 + : "memory", "ecx"
6021 + );
6022 + return a;
6023 +}
6024 +
6025 static inline long long atomic64_dec_return(atomic64_t *v)
6026 {
6027 long long a;
6028 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6029 }
6030
6031 /**
6032 + * atomic64_add_unchecked - add integer to atomic64 variable
6033 + * @i: integer value to add
6034 + * @v: pointer to type atomic64_unchecked_t
6035 + *
6036 + * Atomically adds @i to @v.
6037 + */
6038 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6039 +{
6040 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6041 + : "+A" (i), "+c" (v)
6042 + : : "memory"
6043 + );
6044 + return i;
6045 +}
6046 +
6047 +/**
6048 * atomic64_sub - subtract the atomic64 variable
6049 * @i: integer value to subtract
6050 * @v: pointer to type atomic64_t
6051 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h
6052 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-05-19 00:06:34.000000000 -0400
6053 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-08-05 19:44:33.000000000 -0400
6054 @@ -18,7 +18,19 @@
6055 */
6056 static inline long atomic64_read(const atomic64_t *v)
6057 {
6058 - return (*(volatile long *)&(v)->counter);
6059 + return (*(volatile const long *)&(v)->counter);
6060 +}
6061 +
6062 +/**
6063 + * atomic64_read_unchecked - read atomic64 variable
6064 + * @v: pointer of type atomic64_unchecked_t
6065 + *
6066 + * Atomically reads the value of @v.
6067 + * Doesn't imply a read memory barrier.
6068 + */
6069 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6070 +{
6071 + return (*(volatile const long *)&(v)->counter);
6072 }
6073
6074 /**
6075 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6076 }
6077
6078 /**
6079 + * atomic64_set_unchecked - set atomic64 variable
6080 + * @v: pointer to type atomic64_unchecked_t
6081 + * @i: required value
6082 + *
6083 + * Atomically sets the value of @v to @i.
6084 + */
6085 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6086 +{
6087 + v->counter = i;
6088 +}
6089 +
6090 +/**
6091 * atomic64_add - add integer to atomic64 variable
6092 * @i: integer value to add
6093 * @v: pointer to type atomic64_t
6094 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6095 */
6096 static inline void atomic64_add(long i, atomic64_t *v)
6097 {
6098 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6099 +
6100 +#ifdef CONFIG_PAX_REFCOUNT
6101 + "jno 0f\n"
6102 + LOCK_PREFIX "subq %1,%0\n"
6103 + "int $4\n0:\n"
6104 + _ASM_EXTABLE(0b, 0b)
6105 +#endif
6106 +
6107 + : "=m" (v->counter)
6108 + : "er" (i), "m" (v->counter));
6109 +}
6110 +
6111 +/**
6112 + * atomic64_add_unchecked - add integer to atomic64 variable
6113 + * @i: integer value to add
6114 + * @v: pointer to type atomic64_unchecked_t
6115 + *
6116 + * Atomically adds @i to @v.
6117 + */
6118 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6119 +{
6120 asm volatile(LOCK_PREFIX "addq %1,%0"
6121 : "=m" (v->counter)
6122 : "er" (i), "m" (v->counter));
6123 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6124 */
6125 static inline void atomic64_sub(long i, atomic64_t *v)
6126 {
6127 - asm volatile(LOCK_PREFIX "subq %1,%0"
6128 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6129 +
6130 +#ifdef CONFIG_PAX_REFCOUNT
6131 + "jno 0f\n"
6132 + LOCK_PREFIX "addq %1,%0\n"
6133 + "int $4\n0:\n"
6134 + _ASM_EXTABLE(0b, 0b)
6135 +#endif
6136 +
6137 + : "=m" (v->counter)
6138 + : "er" (i), "m" (v->counter));
6139 +}
6140 +
6141 +/**
6142 + * atomic64_sub_unchecked - subtract the atomic64 variable
6143 + * @i: integer value to subtract
6144 + * @v: pointer to type atomic64_unchecked_t
6145 + *
6146 + * Atomically subtracts @i from @v.
6147 + */
6148 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6149 +{
6150 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6151 : "=m" (v->counter)
6152 : "er" (i), "m" (v->counter));
6153 }
6154 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6155 {
6156 unsigned char c;
6157
6158 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6159 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6160 +
6161 +#ifdef CONFIG_PAX_REFCOUNT
6162 + "jno 0f\n"
6163 + LOCK_PREFIX "addq %2,%0\n"
6164 + "int $4\n0:\n"
6165 + _ASM_EXTABLE(0b, 0b)
6166 +#endif
6167 +
6168 + "sete %1\n"
6169 : "=m" (v->counter), "=qm" (c)
6170 : "er" (i), "m" (v->counter) : "memory");
6171 return c;
6172 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6173 */
6174 static inline void atomic64_inc(atomic64_t *v)
6175 {
6176 + asm volatile(LOCK_PREFIX "incq %0\n"
6177 +
6178 +#ifdef CONFIG_PAX_REFCOUNT
6179 + "jno 0f\n"
6180 + LOCK_PREFIX "decq %0\n"
6181 + "int $4\n0:\n"
6182 + _ASM_EXTABLE(0b, 0b)
6183 +#endif
6184 +
6185 + : "=m" (v->counter)
6186 + : "m" (v->counter));
6187 +}
6188 +
6189 +/**
6190 + * atomic64_inc_unchecked - increment atomic64 variable
6191 + * @v: pointer to type atomic64_unchecked_t
6192 + *
6193 + * Atomically increments @v by 1.
6194 + */
6195 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6196 +{
6197 asm volatile(LOCK_PREFIX "incq %0"
6198 : "=m" (v->counter)
6199 : "m" (v->counter));
6200 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6201 */
6202 static inline void atomic64_dec(atomic64_t *v)
6203 {
6204 - asm volatile(LOCK_PREFIX "decq %0"
6205 + asm volatile(LOCK_PREFIX "decq %0\n"
6206 +
6207 +#ifdef CONFIG_PAX_REFCOUNT
6208 + "jno 0f\n"
6209 + LOCK_PREFIX "incq %0\n"
6210 + "int $4\n0:\n"
6211 + _ASM_EXTABLE(0b, 0b)
6212 +#endif
6213 +
6214 + : "=m" (v->counter)
6215 + : "m" (v->counter));
6216 +}
6217 +
6218 +/**
6219 + * atomic64_dec_unchecked - decrement atomic64 variable
6220 + * @v: pointer to type atomic64_t
6221 + *
6222 + * Atomically decrements @v by 1.
6223 + */
6224 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6225 +{
6226 + asm volatile(LOCK_PREFIX "decq %0\n"
6227 : "=m" (v->counter)
6228 : "m" (v->counter));
6229 }
6230 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6231 {
6232 unsigned char c;
6233
6234 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6235 + asm volatile(LOCK_PREFIX "decq %0\n"
6236 +
6237 +#ifdef CONFIG_PAX_REFCOUNT
6238 + "jno 0f\n"
6239 + LOCK_PREFIX "incq %0\n"
6240 + "int $4\n0:\n"
6241 + _ASM_EXTABLE(0b, 0b)
6242 +#endif
6243 +
6244 + "sete %1\n"
6245 : "=m" (v->counter), "=qm" (c)
6246 : "m" (v->counter) : "memory");
6247 return c != 0;
6248 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6249 {
6250 unsigned char c;
6251
6252 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6253 + asm volatile(LOCK_PREFIX "incq %0\n"
6254 +
6255 +#ifdef CONFIG_PAX_REFCOUNT
6256 + "jno 0f\n"
6257 + LOCK_PREFIX "decq %0\n"
6258 + "int $4\n0:\n"
6259 + _ASM_EXTABLE(0b, 0b)
6260 +#endif
6261 +
6262 + "sete %1\n"
6263 : "=m" (v->counter), "=qm" (c)
6264 : "m" (v->counter) : "memory");
6265 return c != 0;
6266 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6267 {
6268 unsigned char c;
6269
6270 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6271 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6272 +
6273 +#ifdef CONFIG_PAX_REFCOUNT
6274 + "jno 0f\n"
6275 + LOCK_PREFIX "subq %2,%0\n"
6276 + "int $4\n0:\n"
6277 + _ASM_EXTABLE(0b, 0b)
6278 +#endif
6279 +
6280 + "sets %1\n"
6281 : "=m" (v->counter), "=qm" (c)
6282 : "er" (i), "m" (v->counter) : "memory");
6283 return c;
6284 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6285 static inline long atomic64_add_return(long i, atomic64_t *v)
6286 {
6287 long __i = i;
6288 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6289 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6290 +
6291 +#ifdef CONFIG_PAX_REFCOUNT
6292 + "jno 0f\n"
6293 + "movq %0, %1\n"
6294 + "int $4\n0:\n"
6295 + _ASM_EXTABLE(0b, 0b)
6296 +#endif
6297 +
6298 + : "+r" (i), "+m" (v->counter)
6299 + : : "memory");
6300 + return i + __i;
6301 +}
6302 +
6303 +/**
6304 + * atomic64_add_return_unchecked - add and return
6305 + * @i: integer value to add
6306 + * @v: pointer to type atomic64_unchecked_t
6307 + *
6308 + * Atomically adds @i to @v and returns @i + @v
6309 + */
6310 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6311 +{
6312 + long __i = i;
6313 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6314 : "+r" (i), "+m" (v->counter)
6315 : : "memory");
6316 return i + __i;
6317 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6318 }
6319
6320 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6321 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6322 +{
6323 + return atomic64_add_return_unchecked(1, v);
6324 +}
6325 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6326
6327 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6328 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6329 return cmpxchg(&v->counter, old, new);
6330 }
6331
6332 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6333 +{
6334 + return cmpxchg(&v->counter, old, new);
6335 +}
6336 +
6337 static inline long atomic64_xchg(atomic64_t *v, long new)
6338 {
6339 return xchg(&v->counter, new);
6340 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6341 */
6342 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6343 {
6344 - long c, old;
6345 + long c, old, new;
6346 c = atomic64_read(v);
6347 for (;;) {
6348 - if (unlikely(c == (u)))
6349 + if (unlikely(c == u))
6350 break;
6351 - old = atomic64_cmpxchg((v), c, c + (a));
6352 +
6353 + asm volatile("add %2,%0\n"
6354 +
6355 +#ifdef CONFIG_PAX_REFCOUNT
6356 + "jno 0f\n"
6357 + "sub %2,%0\n"
6358 + "int $4\n0:\n"
6359 + _ASM_EXTABLE(0b, 0b)
6360 +#endif
6361 +
6362 + : "=r" (new)
6363 + : "0" (c), "ir" (a));
6364 +
6365 + old = atomic64_cmpxchg(v, c, new);
6366 if (likely(old == c))
6367 break;
6368 c = old;
6369 }
6370 - return c != (u);
6371 + return c != u;
6372 }
6373
6374 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6375 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic.h linux-2.6.39.4/arch/x86/include/asm/atomic.h
6376 --- linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-05-19 00:06:34.000000000 -0400
6377 +++ linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-08-05 19:44:33.000000000 -0400
6378 @@ -22,7 +22,18 @@
6379 */
6380 static inline int atomic_read(const atomic_t *v)
6381 {
6382 - return (*(volatile int *)&(v)->counter);
6383 + return (*(volatile const int *)&(v)->counter);
6384 +}
6385 +
6386 +/**
6387 + * atomic_read_unchecked - read atomic variable
6388 + * @v: pointer of type atomic_unchecked_t
6389 + *
6390 + * Atomically reads the value of @v.
6391 + */
6392 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6393 +{
6394 + return (*(volatile const int *)&(v)->counter);
6395 }
6396
6397 /**
6398 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6399 }
6400
6401 /**
6402 + * atomic_set_unchecked - set atomic variable
6403 + * @v: pointer of type atomic_unchecked_t
6404 + * @i: required value
6405 + *
6406 + * Atomically sets the value of @v to @i.
6407 + */
6408 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6409 +{
6410 + v->counter = i;
6411 +}
6412 +
6413 +/**
6414 * atomic_add - add integer to atomic variable
6415 * @i: integer value to add
6416 * @v: pointer of type atomic_t
6417 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6418 */
6419 static inline void atomic_add(int i, atomic_t *v)
6420 {
6421 - asm volatile(LOCK_PREFIX "addl %1,%0"
6422 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6423 +
6424 +#ifdef CONFIG_PAX_REFCOUNT
6425 + "jno 0f\n"
6426 + LOCK_PREFIX "subl %1,%0\n"
6427 + "int $4\n0:\n"
6428 + _ASM_EXTABLE(0b, 0b)
6429 +#endif
6430 +
6431 + : "+m" (v->counter)
6432 + : "ir" (i));
6433 +}
6434 +
6435 +/**
6436 + * atomic_add_unchecked - add integer to atomic variable
6437 + * @i: integer value to add
6438 + * @v: pointer of type atomic_unchecked_t
6439 + *
6440 + * Atomically adds @i to @v.
6441 + */
6442 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6443 +{
6444 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6445 : "+m" (v->counter)
6446 : "ir" (i));
6447 }
6448 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6449 */
6450 static inline void atomic_sub(int i, atomic_t *v)
6451 {
6452 - asm volatile(LOCK_PREFIX "subl %1,%0"
6453 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6454 +
6455 +#ifdef CONFIG_PAX_REFCOUNT
6456 + "jno 0f\n"
6457 + LOCK_PREFIX "addl %1,%0\n"
6458 + "int $4\n0:\n"
6459 + _ASM_EXTABLE(0b, 0b)
6460 +#endif
6461 +
6462 + : "+m" (v->counter)
6463 + : "ir" (i));
6464 +}
6465 +
6466 +/**
6467 + * atomic_sub_unchecked - subtract integer from atomic variable
6468 + * @i: integer value to subtract
6469 + * @v: pointer of type atomic_unchecked_t
6470 + *
6471 + * Atomically subtracts @i from @v.
6472 + */
6473 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6474 +{
6475 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6476 : "+m" (v->counter)
6477 : "ir" (i));
6478 }
6479 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6480 {
6481 unsigned char c;
6482
6483 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6484 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6485 +
6486 +#ifdef CONFIG_PAX_REFCOUNT
6487 + "jno 0f\n"
6488 + LOCK_PREFIX "addl %2,%0\n"
6489 + "int $4\n0:\n"
6490 + _ASM_EXTABLE(0b, 0b)
6491 +#endif
6492 +
6493 + "sete %1\n"
6494 : "+m" (v->counter), "=qm" (c)
6495 : "ir" (i) : "memory");
6496 return c;
6497 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6498 */
6499 static inline void atomic_inc(atomic_t *v)
6500 {
6501 - asm volatile(LOCK_PREFIX "incl %0"
6502 + asm volatile(LOCK_PREFIX "incl %0\n"
6503 +
6504 +#ifdef CONFIG_PAX_REFCOUNT
6505 + "jno 0f\n"
6506 + LOCK_PREFIX "decl %0\n"
6507 + "int $4\n0:\n"
6508 + _ASM_EXTABLE(0b, 0b)
6509 +#endif
6510 +
6511 + : "+m" (v->counter));
6512 +}
6513 +
6514 +/**
6515 + * atomic_inc_unchecked - increment atomic variable
6516 + * @v: pointer of type atomic_unchecked_t
6517 + *
6518 + * Atomically increments @v by 1.
6519 + */
6520 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6521 +{
6522 + asm volatile(LOCK_PREFIX "incl %0\n"
6523 : "+m" (v->counter));
6524 }
6525
6526 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6527 */
6528 static inline void atomic_dec(atomic_t *v)
6529 {
6530 - asm volatile(LOCK_PREFIX "decl %0"
6531 + asm volatile(LOCK_PREFIX "decl %0\n"
6532 +
6533 +#ifdef CONFIG_PAX_REFCOUNT
6534 + "jno 0f\n"
6535 + LOCK_PREFIX "incl %0\n"
6536 + "int $4\n0:\n"
6537 + _ASM_EXTABLE(0b, 0b)
6538 +#endif
6539 +
6540 + : "+m" (v->counter));
6541 +}
6542 +
6543 +/**
6544 + * atomic_dec_unchecked - decrement atomic variable
6545 + * @v: pointer of type atomic_unchecked_t
6546 + *
6547 + * Atomically decrements @v by 1.
6548 + */
6549 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6550 +{
6551 + asm volatile(LOCK_PREFIX "decl %0\n"
6552 : "+m" (v->counter));
6553 }
6554
6555 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6556 {
6557 unsigned char c;
6558
6559 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6560 + asm volatile(LOCK_PREFIX "decl %0\n"
6561 +
6562 +#ifdef CONFIG_PAX_REFCOUNT
6563 + "jno 0f\n"
6564 + LOCK_PREFIX "incl %0\n"
6565 + "int $4\n0:\n"
6566 + _ASM_EXTABLE(0b, 0b)
6567 +#endif
6568 +
6569 + "sete %1\n"
6570 : "+m" (v->counter), "=qm" (c)
6571 : : "memory");
6572 return c != 0;
6573 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6574 {
6575 unsigned char c;
6576
6577 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6578 + asm volatile(LOCK_PREFIX "incl %0\n"
6579 +
6580 +#ifdef CONFIG_PAX_REFCOUNT
6581 + "jno 0f\n"
6582 + LOCK_PREFIX "decl %0\n"
6583 + "int $4\n0:\n"
6584 + _ASM_EXTABLE(0b, 0b)
6585 +#endif
6586 +
6587 + "sete %1\n"
6588 + : "+m" (v->counter), "=qm" (c)
6589 + : : "memory");
6590 + return c != 0;
6591 +}
6592 +
6593 +/**
6594 + * atomic_inc_and_test_unchecked - increment and test
6595 + * @v: pointer of type atomic_unchecked_t
6596 + *
6597 + * Atomically increments @v by 1
6598 + * and returns true if the result is zero, or false for all
6599 + * other cases.
6600 + */
6601 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6602 +{
6603 + unsigned char c;
6604 +
6605 + asm volatile(LOCK_PREFIX "incl %0\n"
6606 + "sete %1\n"
6607 : "+m" (v->counter), "=qm" (c)
6608 : : "memory");
6609 return c != 0;
6610 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6611 {
6612 unsigned char c;
6613
6614 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6615 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6616 +
6617 +#ifdef CONFIG_PAX_REFCOUNT
6618 + "jno 0f\n"
6619 + LOCK_PREFIX "subl %2,%0\n"
6620 + "int $4\n0:\n"
6621 + _ASM_EXTABLE(0b, 0b)
6622 +#endif
6623 +
6624 + "sets %1\n"
6625 : "+m" (v->counter), "=qm" (c)
6626 : "ir" (i) : "memory");
6627 return c;
6628 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6629 #endif
6630 /* Modern 486+ processor */
6631 __i = i;
6632 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6633 +
6634 +#ifdef CONFIG_PAX_REFCOUNT
6635 + "jno 0f\n"
6636 + "movl %0, %1\n"
6637 + "int $4\n0:\n"
6638 + _ASM_EXTABLE(0b, 0b)
6639 +#endif
6640 +
6641 + : "+r" (i), "+m" (v->counter)
6642 + : : "memory");
6643 + return i + __i;
6644 +
6645 +#ifdef CONFIG_M386
6646 +no_xadd: /* Legacy 386 processor */
6647 + local_irq_save(flags);
6648 + __i = atomic_read(v);
6649 + atomic_set(v, i + __i);
6650 + local_irq_restore(flags);
6651 + return i + __i;
6652 +#endif
6653 +}
6654 +
6655 +/**
6656 + * atomic_add_return_unchecked - add integer and return
6657 + * @v: pointer of type atomic_unchecked_t
6658 + * @i: integer value to add
6659 + *
6660 + * Atomically adds @i to @v and returns @i + @v
6661 + */
6662 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6663 +{
6664 + int __i;
6665 +#ifdef CONFIG_M386
6666 + unsigned long flags;
6667 + if (unlikely(boot_cpu_data.x86 <= 3))
6668 + goto no_xadd;
6669 +#endif
6670 + /* Modern 486+ processor */
6671 + __i = i;
6672 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6673 : "+r" (i), "+m" (v->counter)
6674 : : "memory");
6675 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6676 }
6677
6678 #define atomic_inc_return(v) (atomic_add_return(1, v))
6679 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6680 +{
6681 + return atomic_add_return_unchecked(1, v);
6682 +}
6683 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6684
6685 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6686 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6687 return cmpxchg(&v->counter, old, new);
6688 }
6689
6690 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6691 +{
6692 + return cmpxchg(&v->counter, old, new);
6693 +}
6694 +
6695 static inline int atomic_xchg(atomic_t *v, int new)
6696 {
6697 return xchg(&v->counter, new);
6698 }
6699
6700 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6701 +{
6702 + return xchg(&v->counter, new);
6703 +}
6704 +
6705 /**
6706 * atomic_add_unless - add unless the number is already a given value
6707 * @v: pointer of type atomic_t
6708 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6709 */
6710 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6711 {
6712 - int c, old;
6713 + int c, old, new;
6714 c = atomic_read(v);
6715 for (;;) {
6716 - if (unlikely(c == (u)))
6717 + if (unlikely(c == u))
6718 break;
6719 - old = atomic_cmpxchg((v), c, c + (a));
6720 +
6721 + asm volatile("addl %2,%0\n"
6722 +
6723 +#ifdef CONFIG_PAX_REFCOUNT
6724 + "jno 0f\n"
6725 + "subl %2,%0\n"
6726 + "int $4\n0:\n"
6727 + _ASM_EXTABLE(0b, 0b)
6728 +#endif
6729 +
6730 + : "=r" (new)
6731 + : "0" (c), "ir" (a));
6732 +
6733 + old = atomic_cmpxchg(v, c, new);
6734 if (likely(old == c))
6735 break;
6736 c = old;
6737 }
6738 - return c != (u);
6739 + return c != u;
6740 }
6741
6742 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6743
6744 +/**
6745 + * atomic_inc_not_zero_hint - increment if not null
6746 + * @v: pointer of type atomic_t
6747 + * @hint: probable value of the atomic before the increment
6748 + *
6749 + * This version of atomic_inc_not_zero() gives a hint of probable
6750 + * value of the atomic. This helps processor to not read the memory
6751 + * before doing the atomic read/modify/write cycle, lowering
6752 + * number of bus transactions on some arches.
6753 + *
6754 + * Returns: 0 if increment was not done, 1 otherwise.
6755 + */
6756 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6757 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6758 +{
6759 + int val, c = hint, new;
6760 +
6761 + /* sanity test, should be removed by compiler if hint is a constant */
6762 + if (!hint)
6763 + return atomic_inc_not_zero(v);
6764 +
6765 + do {
6766 + asm volatile("incl %0\n"
6767 +
6768 +#ifdef CONFIG_PAX_REFCOUNT
6769 + "jno 0f\n"
6770 + "decl %0\n"
6771 + "int $4\n0:\n"
6772 + _ASM_EXTABLE(0b, 0b)
6773 +#endif
6774 +
6775 + : "=r" (new)
6776 + : "0" (c));
6777 +
6778 + val = atomic_cmpxchg(v, c, new);
6779 + if (val == c)
6780 + return 1;
6781 + c = val;
6782 + } while (c);
6783 +
6784 + return 0;
6785 +}
6786 +
6787 /*
6788 * atomic_dec_if_positive - decrement by 1 if old value positive
6789 * @v: pointer of type atomic_t
6790 diff -urNp linux-2.6.39.4/arch/x86/include/asm/bitops.h linux-2.6.39.4/arch/x86/include/asm/bitops.h
6791 --- linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-05-19 00:06:34.000000000 -0400
6792 +++ linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-08-05 19:44:33.000000000 -0400
6793 @@ -38,7 +38,7 @@
6794 * a mask operation on a byte.
6795 */
6796 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6797 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6798 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6799 #define CONST_MASK(nr) (1 << ((nr) & 7))
6800
6801 /**
6802 diff -urNp linux-2.6.39.4/arch/x86/include/asm/boot.h linux-2.6.39.4/arch/x86/include/asm/boot.h
6803 --- linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-05-19 00:06:34.000000000 -0400
6804 +++ linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-08-05 19:44:33.000000000 -0400
6805 @@ -11,10 +11,15 @@
6806 #include <asm/pgtable_types.h>
6807
6808 /* Physical address where kernel should be loaded. */
6809 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6810 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6811 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6812 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6813
6814 +#ifndef __ASSEMBLY__
6815 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6816 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6817 +#endif
6818 +
6819 /* Minimum kernel alignment, as a power of two */
6820 #ifdef CONFIG_X86_64
6821 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6822 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cacheflush.h linux-2.6.39.4/arch/x86/include/asm/cacheflush.h
6823 --- linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400
6824 +++ linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-08-05 19:44:33.000000000 -0400
6825 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6826 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6827
6828 if (pg_flags == _PGMT_DEFAULT)
6829 - return -1;
6830 + return ~0UL;
6831 else if (pg_flags == _PGMT_WC)
6832 return _PAGE_CACHE_WC;
6833 else if (pg_flags == _PGMT_UC_MINUS)
6834 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cache.h linux-2.6.39.4/arch/x86/include/asm/cache.h
6835 --- linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
6836 +++ linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
6837 @@ -5,12 +5,13 @@
6838
6839 /* L1 cache line size */
6840 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6841 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6842 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6843
6844 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6845 +#define __read_only __attribute__((__section__(".data..read_only")))
6846
6847 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6848 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6849 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6850
6851 #ifdef CONFIG_X86_VSMP
6852 #ifdef CONFIG_SMP
6853 diff -urNp linux-2.6.39.4/arch/x86/include/asm/checksum_32.h linux-2.6.39.4/arch/x86/include/asm/checksum_32.h
6854 --- linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-05-19 00:06:34.000000000 -0400
6855 +++ linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-08-05 19:44:33.000000000 -0400
6856 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6857 int len, __wsum sum,
6858 int *src_err_ptr, int *dst_err_ptr);
6859
6860 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6861 + int len, __wsum sum,
6862 + int *src_err_ptr, int *dst_err_ptr);
6863 +
6864 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6865 + int len, __wsum sum,
6866 + int *src_err_ptr, int *dst_err_ptr);
6867 +
6868 /*
6869 * Note: when you get a NULL pointer exception here this means someone
6870 * passed in an incorrect kernel address to one of these functions.
6871 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6872 int *err_ptr)
6873 {
6874 might_sleep();
6875 - return csum_partial_copy_generic((__force void *)src, dst,
6876 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6877 len, sum, err_ptr, NULL);
6878 }
6879
6880 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6881 {
6882 might_sleep();
6883 if (access_ok(VERIFY_WRITE, dst, len))
6884 - return csum_partial_copy_generic(src, (__force void *)dst,
6885 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6886 len, sum, NULL, err_ptr);
6887
6888 if (len)
6889 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cpufeature.h linux-2.6.39.4/arch/x86/include/asm/cpufeature.h
6890 --- linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-06-03 00:04:13.000000000 -0400
6891 +++ linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-08-05 19:44:33.000000000 -0400
6892 @@ -351,7 +351,7 @@ static __always_inline __pure bool __sta
6893 ".section .discard,\"aw\",@progbits\n"
6894 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6895 ".previous\n"
6896 - ".section .altinstr_replacement,\"ax\"\n"
6897 + ".section .altinstr_replacement,\"a\"\n"
6898 "3: movb $1,%0\n"
6899 "4:\n"
6900 ".previous\n"
6901 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc_defs.h linux-2.6.39.4/arch/x86/include/asm/desc_defs.h
6902 --- linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-05-19 00:06:34.000000000 -0400
6903 +++ linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-08-05 19:44:33.000000000 -0400
6904 @@ -31,6 +31,12 @@ struct desc_struct {
6905 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6906 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6907 };
6908 + struct {
6909 + u16 offset_low;
6910 + u16 seg;
6911 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6912 + unsigned offset_high: 16;
6913 + } gate;
6914 };
6915 } __attribute__((packed));
6916
6917 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc.h linux-2.6.39.4/arch/x86/include/asm/desc.h
6918 --- linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-05-19 00:06:34.000000000 -0400
6919 +++ linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-08-05 19:44:33.000000000 -0400
6920 @@ -4,6 +4,7 @@
6921 #include <asm/desc_defs.h>
6922 #include <asm/ldt.h>
6923 #include <asm/mmu.h>
6924 +#include <asm/pgtable.h>
6925 #include <linux/smp.h>
6926
6927 static inline void fill_ldt(struct desc_struct *desc,
6928 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
6929 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
6930 desc->type = (info->read_exec_only ^ 1) << 1;
6931 desc->type |= info->contents << 2;
6932 + desc->type |= info->seg_not_present ^ 1;
6933 desc->s = 1;
6934 desc->dpl = 0x3;
6935 desc->p = info->seg_not_present ^ 1;
6936 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
6937 }
6938
6939 extern struct desc_ptr idt_descr;
6940 -extern gate_desc idt_table[];
6941 -
6942 -struct gdt_page {
6943 - struct desc_struct gdt[GDT_ENTRIES];
6944 -} __attribute__((aligned(PAGE_SIZE)));
6945 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6946 +extern gate_desc idt_table[256];
6947
6948 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6949 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6950 {
6951 - return per_cpu(gdt_page, cpu).gdt;
6952 + return cpu_gdt_table[cpu];
6953 }
6954
6955 #ifdef CONFIG_X86_64
6956 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
6957 unsigned long base, unsigned dpl, unsigned flags,
6958 unsigned short seg)
6959 {
6960 - gate->a = (seg << 16) | (base & 0xffff);
6961 - gate->b = (base & 0xffff0000) |
6962 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6963 + gate->gate.offset_low = base;
6964 + gate->gate.seg = seg;
6965 + gate->gate.reserved = 0;
6966 + gate->gate.type = type;
6967 + gate->gate.s = 0;
6968 + gate->gate.dpl = dpl;
6969 + gate->gate.p = 1;
6970 + gate->gate.offset_high = base >> 16;
6971 }
6972
6973 #endif
6974 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
6975 static inline void native_write_idt_entry(gate_desc *idt, int entry,
6976 const gate_desc *gate)
6977 {
6978 + pax_open_kernel();
6979 memcpy(&idt[entry], gate, sizeof(*gate));
6980 + pax_close_kernel();
6981 }
6982
6983 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
6984 const void *desc)
6985 {
6986 + pax_open_kernel();
6987 memcpy(&ldt[entry], desc, 8);
6988 + pax_close_kernel();
6989 }
6990
6991 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
6992 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
6993 size = sizeof(struct desc_struct);
6994 break;
6995 }
6996 +
6997 + pax_open_kernel();
6998 memcpy(&gdt[entry], desc, size);
6999 + pax_close_kernel();
7000 }
7001
7002 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7003 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7004
7005 static inline void native_load_tr_desc(void)
7006 {
7007 + pax_open_kernel();
7008 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7009 + pax_close_kernel();
7010 }
7011
7012 static inline void native_load_gdt(const struct desc_ptr *dtr)
7013 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7014 unsigned int i;
7015 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7016
7017 + pax_open_kernel();
7018 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7019 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7020 + pax_close_kernel();
7021 }
7022
7023 #define _LDT_empty(info) \
7024 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7025 desc->limit = (limit >> 16) & 0xf;
7026 }
7027
7028 -static inline void _set_gate(int gate, unsigned type, void *addr,
7029 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7030 unsigned dpl, unsigned ist, unsigned seg)
7031 {
7032 gate_desc s;
7033 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7034 * Pentium F0 0F bugfix can have resulted in the mapped
7035 * IDT being write-protected.
7036 */
7037 -static inline void set_intr_gate(unsigned int n, void *addr)
7038 +static inline void set_intr_gate(unsigned int n, const void *addr)
7039 {
7040 BUG_ON((unsigned)n > 0xFF);
7041 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7042 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7043 /*
7044 * This routine sets up an interrupt gate at directory privilege level 3.
7045 */
7046 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7047 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7048 {
7049 BUG_ON((unsigned)n > 0xFF);
7050 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7051 }
7052
7053 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7054 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7055 {
7056 BUG_ON((unsigned)n > 0xFF);
7057 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7058 }
7059
7060 -static inline void set_trap_gate(unsigned int n, void *addr)
7061 +static inline void set_trap_gate(unsigned int n, const void *addr)
7062 {
7063 BUG_ON((unsigned)n > 0xFF);
7064 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7065 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
7066 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7067 {
7068 BUG_ON((unsigned)n > 0xFF);
7069 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7070 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7071 }
7072
7073 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7074 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7075 {
7076 BUG_ON((unsigned)n > 0xFF);
7077 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7078 }
7079
7080 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7081 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7082 {
7083 BUG_ON((unsigned)n > 0xFF);
7084 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7085 }
7086
7087 +#ifdef CONFIG_X86_32
7088 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7089 +{
7090 + struct desc_struct d;
7091 +
7092 + if (likely(limit))
7093 + limit = (limit - 1UL) >> PAGE_SHIFT;
7094 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7095 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7096 +}
7097 +#endif
7098 +
7099 #endif /* _ASM_X86_DESC_H */
7100 diff -urNp linux-2.6.39.4/arch/x86/include/asm/e820.h linux-2.6.39.4/arch/x86/include/asm/e820.h
7101 --- linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-05-19 00:06:34.000000000 -0400
7102 +++ linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-08-05 19:44:33.000000000 -0400
7103 @@ -69,7 +69,7 @@ struct e820map {
7104 #define ISA_START_ADDRESS 0xa0000
7105 #define ISA_END_ADDRESS 0x100000
7106
7107 -#define BIOS_BEGIN 0x000a0000
7108 +#define BIOS_BEGIN 0x000c0000
7109 #define BIOS_END 0x00100000
7110
7111 #define BIOS_ROM_BASE 0xffe00000
7112 diff -urNp linux-2.6.39.4/arch/x86/include/asm/elf.h linux-2.6.39.4/arch/x86/include/asm/elf.h
7113 --- linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
7114 +++ linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
7115 @@ -237,7 +237,25 @@ extern int force_personality32;
7116 the loader. We need to make sure that it is out of the way of the program
7117 that it will "exec", and that there is sufficient room for the brk. */
7118
7119 +#ifdef CONFIG_PAX_SEGMEXEC
7120 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7121 +#else
7122 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7123 +#endif
7124 +
7125 +#ifdef CONFIG_PAX_ASLR
7126 +#ifdef CONFIG_X86_32
7127 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7128 +
7129 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7130 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7131 +#else
7132 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7133 +
7134 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7135 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7136 +#endif
7137 +#endif
7138
7139 /* This yields a mask that user programs can use to figure out what
7140 instruction set this CPU supports. This could be done in user space,
7141 @@ -291,8 +309,7 @@ do { \
7142 #define ARCH_DLINFO \
7143 do { \
7144 if (vdso_enabled) \
7145 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7146 - (unsigned long)current->mm->context.vdso); \
7147 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
7148 } while (0)
7149
7150 #define AT_SYSINFO 32
7151 @@ -303,7 +320,7 @@ do { \
7152
7153 #endif /* !CONFIG_X86_32 */
7154
7155 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7156 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7157
7158 #define VDSO_ENTRY \
7159 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7160 @@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
7161 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7162 #define compat_arch_setup_additional_pages syscall32_setup_pages
7163
7164 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7165 -#define arch_randomize_brk arch_randomize_brk
7166 -
7167 #endif /* _ASM_X86_ELF_H */
7168 diff -urNp linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h
7169 --- linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-05-19 00:06:34.000000000 -0400
7170 +++ linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-08-05 19:44:33.000000000 -0400
7171 @@ -15,6 +15,6 @@ enum reboot_type {
7172
7173 extern enum reboot_type reboot_type;
7174
7175 -extern void machine_emergency_restart(void);
7176 +extern void machine_emergency_restart(void) __noreturn;
7177
7178 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7179 diff -urNp linux-2.6.39.4/arch/x86/include/asm/futex.h linux-2.6.39.4/arch/x86/include/asm/futex.h
7180 --- linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-05-19 00:06:34.000000000 -0400
7181 +++ linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-08-05 19:44:33.000000000 -0400
7182 @@ -12,16 +12,18 @@
7183 #include <asm/system.h>
7184
7185 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7186 + typecheck(u32 *, uaddr); \
7187 asm volatile("1:\t" insn "\n" \
7188 "2:\t.section .fixup,\"ax\"\n" \
7189 "3:\tmov\t%3, %1\n" \
7190 "\tjmp\t2b\n" \
7191 "\t.previous\n" \
7192 _ASM_EXTABLE(1b, 3b) \
7193 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7194 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7195 : "i" (-EFAULT), "0" (oparg), "1" (0))
7196
7197 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7198 + typecheck(u32 *, uaddr); \
7199 asm volatile("1:\tmovl %2, %0\n" \
7200 "\tmovl\t%0, %3\n" \
7201 "\t" insn "\n" \
7202 @@ -34,7 +36,7 @@
7203 _ASM_EXTABLE(1b, 4b) \
7204 _ASM_EXTABLE(2b, 4b) \
7205 : "=&a" (oldval), "=&r" (ret), \
7206 - "+m" (*uaddr), "=&r" (tem) \
7207 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7208 : "r" (oparg), "i" (-EFAULT), "1" (0))
7209
7210 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7211 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7212
7213 switch (op) {
7214 case FUTEX_OP_SET:
7215 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7216 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7217 break;
7218 case FUTEX_OP_ADD:
7219 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7220 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7221 uaddr, oparg);
7222 break;
7223 case FUTEX_OP_OR:
7224 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7225 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7226 return -EFAULT;
7227
7228 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7229 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7230 "2:\t.section .fixup, \"ax\"\n"
7231 "3:\tmov %3, %0\n"
7232 "\tjmp 2b\n"
7233 "\t.previous\n"
7234 _ASM_EXTABLE(1b, 3b)
7235 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7236 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7237 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7238 : "memory"
7239 );
7240 diff -urNp linux-2.6.39.4/arch/x86/include/asm/hw_irq.h linux-2.6.39.4/arch/x86/include/asm/hw_irq.h
7241 --- linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-05-19 00:06:34.000000000 -0400
7242 +++ linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-08-05 19:44:33.000000000 -0400
7243 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7244 extern void enable_IO_APIC(void);
7245
7246 /* Statistics */
7247 -extern atomic_t irq_err_count;
7248 -extern atomic_t irq_mis_count;
7249 +extern atomic_unchecked_t irq_err_count;
7250 +extern atomic_unchecked_t irq_mis_count;
7251
7252 /* EISA */
7253 extern void eisa_set_level_irq(unsigned int irq);
7254 diff -urNp linux-2.6.39.4/arch/x86/include/asm/i387.h linux-2.6.39.4/arch/x86/include/asm/i387.h
7255 --- linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-05-19 00:06:34.000000000 -0400
7256 +++ linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-08-05 19:44:33.000000000 -0400
7257 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7258 {
7259 int err;
7260
7261 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7262 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7263 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7264 +#endif
7265 +
7266 /* See comment in fxsave() below. */
7267 #ifdef CONFIG_AS_FXSAVEQ
7268 asm volatile("1: fxrstorq %[fx]\n\t"
7269 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7270 {
7271 int err;
7272
7273 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7274 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7275 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7276 +#endif
7277 +
7278 /*
7279 * Clear the bytes not touched by the fxsave and reserved
7280 * for the SW usage.
7281 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7282 #endif /* CONFIG_X86_64 */
7283
7284 /* We need a safe address that is cheap to find and that is already
7285 - in L1 during context switch. The best choices are unfortunately
7286 - different for UP and SMP */
7287 -#ifdef CONFIG_SMP
7288 -#define safe_address (__per_cpu_offset[0])
7289 -#else
7290 -#define safe_address (kstat_cpu(0).cpustat.user)
7291 -#endif
7292 + in L1 during context switch. */
7293 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7294
7295 /*
7296 * These must be called with preempt disabled
7297 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7298 struct thread_info *me = current_thread_info();
7299 preempt_disable();
7300 if (me->status & TS_USEDFPU)
7301 - __save_init_fpu(me->task);
7302 + __save_init_fpu(current);
7303 else
7304 clts();
7305 }
7306 diff -urNp linux-2.6.39.4/arch/x86/include/asm/io.h linux-2.6.39.4/arch/x86/include/asm/io.h
7307 --- linux-2.6.39.4/arch/x86/include/asm/io.h 2011-05-19 00:06:34.000000000 -0400
7308 +++ linux-2.6.39.4/arch/x86/include/asm/io.h 2011-08-05 19:44:33.000000000 -0400
7309 @@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void);
7310
7311 #include <linux/vmalloc.h>
7312
7313 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7314 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7315 +{
7316 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7317 +}
7318 +
7319 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7320 +{
7321 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7322 +}
7323 +
7324 /*
7325 * Convert a virtual cached pointer to an uncached pointer
7326 */
7327 diff -urNp linux-2.6.39.4/arch/x86/include/asm/irqflags.h linux-2.6.39.4/arch/x86/include/asm/irqflags.h
7328 --- linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-05-19 00:06:34.000000000 -0400
7329 +++ linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-08-05 19:44:33.000000000 -0400
7330 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7331 sti; \
7332 sysexit
7333
7334 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7335 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7336 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7337 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7338 +
7339 #else
7340 #define INTERRUPT_RETURN iret
7341 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7342 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kprobes.h linux-2.6.39.4/arch/x86/include/asm/kprobes.h
7343 --- linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-05-19 00:06:34.000000000 -0400
7344 +++ linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-08-05 19:44:33.000000000 -0400
7345 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7346 #define RELATIVEJUMP_SIZE 5
7347 #define RELATIVECALL_OPCODE 0xe8
7348 #define RELATIVE_ADDR_SIZE 4
7349 -#define MAX_STACK_SIZE 64
7350 -#define MIN_STACK_SIZE(ADDR) \
7351 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7352 - THREAD_SIZE - (unsigned long)(ADDR))) \
7353 - ? (MAX_STACK_SIZE) \
7354 - : (((unsigned long)current_thread_info()) + \
7355 - THREAD_SIZE - (unsigned long)(ADDR)))
7356 +#define MAX_STACK_SIZE 64UL
7357 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7358
7359 #define flush_insn_slot(p) do { } while (0)
7360
7361 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kvm_host.h linux-2.6.39.4/arch/x86/include/asm/kvm_host.h
7362 --- linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
7363 +++ linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-08-05 20:34:06.000000000 -0400
7364 @@ -419,7 +419,7 @@ struct kvm_arch {
7365 unsigned int n_used_mmu_pages;
7366 unsigned int n_requested_mmu_pages;
7367 unsigned int n_max_mmu_pages;
7368 - atomic_t invlpg_counter;
7369 + atomic_unchecked_t invlpg_counter;
7370 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7371 /*
7372 * Hash table of struct kvm_mmu_page.
7373 @@ -589,7 +589,7 @@ struct kvm_x86_ops {
7374 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
7375
7376 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
7377 - const struct trace_print_flags *exit_reasons_str;
7378 + const struct trace_print_flags * const exit_reasons_str;
7379 };
7380
7381 struct kvm_arch_async_pf {
7382 diff -urNp linux-2.6.39.4/arch/x86/include/asm/local.h linux-2.6.39.4/arch/x86/include/asm/local.h
7383 --- linux-2.6.39.4/arch/x86/include/asm/local.h 2011-05-19 00:06:34.000000000 -0400
7384 +++ linux-2.6.39.4/arch/x86/include/asm/local.h 2011-08-05 19:44:33.000000000 -0400
7385 @@ -18,26 +18,58 @@ typedef struct {
7386
7387 static inline void local_inc(local_t *l)
7388 {
7389 - asm volatile(_ASM_INC "%0"
7390 + asm volatile(_ASM_INC "%0\n"
7391 +
7392 +#ifdef CONFIG_PAX_REFCOUNT
7393 + "jno 0f\n"
7394 + _ASM_DEC "%0\n"
7395 + "int $4\n0:\n"
7396 + _ASM_EXTABLE(0b, 0b)
7397 +#endif
7398 +
7399 : "+m" (l->a.counter));
7400 }
7401
7402 static inline void local_dec(local_t *l)
7403 {
7404 - asm volatile(_ASM_DEC "%0"
7405 + asm volatile(_ASM_DEC "%0\n"
7406 +
7407 +#ifdef CONFIG_PAX_REFCOUNT
7408 + "jno 0f\n"
7409 + _ASM_INC "%0\n"
7410 + "int $4\n0:\n"
7411 + _ASM_EXTABLE(0b, 0b)
7412 +#endif
7413 +
7414 : "+m" (l->a.counter));
7415 }
7416
7417 static inline void local_add(long i, local_t *l)
7418 {
7419 - asm volatile(_ASM_ADD "%1,%0"
7420 + asm volatile(_ASM_ADD "%1,%0\n"
7421 +
7422 +#ifdef CONFIG_PAX_REFCOUNT
7423 + "jno 0f\n"
7424 + _ASM_SUB "%1,%0\n"
7425 + "int $4\n0:\n"
7426 + _ASM_EXTABLE(0b, 0b)
7427 +#endif
7428 +
7429 : "+m" (l->a.counter)
7430 : "ir" (i));
7431 }
7432
7433 static inline void local_sub(long i, local_t *l)
7434 {
7435 - asm volatile(_ASM_SUB "%1,%0"
7436 + asm volatile(_ASM_SUB "%1,%0\n"
7437 +
7438 +#ifdef CONFIG_PAX_REFCOUNT
7439 + "jno 0f\n"
7440 + _ASM_ADD "%1,%0\n"
7441 + "int $4\n0:\n"
7442 + _ASM_EXTABLE(0b, 0b)
7443 +#endif
7444 +
7445 : "+m" (l->a.counter)
7446 : "ir" (i));
7447 }
7448 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7449 {
7450 unsigned char c;
7451
7452 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7453 + asm volatile(_ASM_SUB "%2,%0\n"
7454 +
7455 +#ifdef CONFIG_PAX_REFCOUNT
7456 + "jno 0f\n"
7457 + _ASM_ADD "%2,%0\n"
7458 + "int $4\n0:\n"
7459 + _ASM_EXTABLE(0b, 0b)
7460 +#endif
7461 +
7462 + "sete %1\n"
7463 : "+m" (l->a.counter), "=qm" (c)
7464 : "ir" (i) : "memory");
7465 return c;
7466 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7467 {
7468 unsigned char c;
7469
7470 - asm volatile(_ASM_DEC "%0; sete %1"
7471 + asm volatile(_ASM_DEC "%0\n"
7472 +
7473 +#ifdef CONFIG_PAX_REFCOUNT
7474 + "jno 0f\n"
7475 + _ASM_INC "%0\n"
7476 + "int $4\n0:\n"
7477 + _ASM_EXTABLE(0b, 0b)
7478 +#endif
7479 +
7480 + "sete %1\n"
7481 : "+m" (l->a.counter), "=qm" (c)
7482 : : "memory");
7483 return c != 0;
7484 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7485 {
7486 unsigned char c;
7487
7488 - asm volatile(_ASM_INC "%0; sete %1"
7489 + asm volatile(_ASM_INC "%0\n"
7490 +
7491 +#ifdef CONFIG_PAX_REFCOUNT
7492 + "jno 0f\n"
7493 + _ASM_DEC "%0\n"
7494 + "int $4\n0:\n"
7495 + _ASM_EXTABLE(0b, 0b)
7496 +#endif
7497 +
7498 + "sete %1\n"
7499 : "+m" (l->a.counter), "=qm" (c)
7500 : : "memory");
7501 return c != 0;
7502 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7503 {
7504 unsigned char c;
7505
7506 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7507 + asm volatile(_ASM_ADD "%2,%0\n"
7508 +
7509 +#ifdef CONFIG_PAX_REFCOUNT
7510 + "jno 0f\n"
7511 + _ASM_SUB "%2,%0\n"
7512 + "int $4\n0:\n"
7513 + _ASM_EXTABLE(0b, 0b)
7514 +#endif
7515 +
7516 + "sets %1\n"
7517 : "+m" (l->a.counter), "=qm" (c)
7518 : "ir" (i) : "memory");
7519 return c;
7520 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7521 #endif
7522 /* Modern 486+ processor */
7523 __i = i;
7524 - asm volatile(_ASM_XADD "%0, %1;"
7525 + asm volatile(_ASM_XADD "%0, %1\n"
7526 +
7527 +#ifdef CONFIG_PAX_REFCOUNT
7528 + "jno 0f\n"
7529 + _ASM_MOV "%0,%1\n"
7530 + "int $4\n0:\n"
7531 + _ASM_EXTABLE(0b, 0b)
7532 +#endif
7533 +
7534 : "+r" (i), "+m" (l->a.counter)
7535 : : "memory");
7536 return i + __i;
7537 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mman.h linux-2.6.39.4/arch/x86/include/asm/mman.h
7538 --- linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-05-19 00:06:34.000000000 -0400
7539 +++ linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-08-05 19:44:33.000000000 -0400
7540 @@ -5,4 +5,14 @@
7541
7542 #include <asm-generic/mman.h>
7543
7544 +#ifdef __KERNEL__
7545 +#ifndef __ASSEMBLY__
7546 +#ifdef CONFIG_X86_32
7547 +#define arch_mmap_check i386_mmap_check
7548 +int i386_mmap_check(unsigned long addr, unsigned long len,
7549 + unsigned long flags);
7550 +#endif
7551 +#endif
7552 +#endif
7553 +
7554 #endif /* _ASM_X86_MMAN_H */
7555 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu_context.h linux-2.6.39.4/arch/x86/include/asm/mmu_context.h
7556 --- linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-05-19 00:06:34.000000000 -0400
7557 +++ linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-08-05 19:44:33.000000000 -0400
7558 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
7559
7560 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7561 {
7562 +
7563 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7564 + unsigned int i;
7565 + pgd_t *pgd;
7566 +
7567 + pax_open_kernel();
7568 + pgd = get_cpu_pgd(smp_processor_id());
7569 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7570 + if (paravirt_enabled())
7571 + set_pgd(pgd+i, native_make_pgd(0));
7572 + else
7573 + pgd[i] = native_make_pgd(0);
7574 + pax_close_kernel();
7575 +#endif
7576 +
7577 #ifdef CONFIG_SMP
7578 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7579 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7580 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
7581 struct task_struct *tsk)
7582 {
7583 unsigned cpu = smp_processor_id();
7584 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
7585 + int tlbstate = TLBSTATE_OK;
7586 +#endif
7587
7588 if (likely(prev != next)) {
7589 #ifdef CONFIG_SMP
7590 +#ifdef CONFIG_X86_32
7591 + tlbstate = percpu_read(cpu_tlbstate.state);
7592 +#endif
7593 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7594 percpu_write(cpu_tlbstate.active_mm, next);
7595 #endif
7596 cpumask_set_cpu(cpu, mm_cpumask(next));
7597
7598 /* Re-load page tables */
7599 +#ifdef CONFIG_PAX_PER_CPU_PGD
7600 + pax_open_kernel();
7601 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7602 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7603 + pax_close_kernel();
7604 + load_cr3(get_cpu_pgd(cpu));
7605 +#else
7606 load_cr3(next->pgd);
7607 +#endif
7608
7609 /* stop flush ipis for the previous mm */
7610 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7611 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
7612 */
7613 if (unlikely(prev->context.ldt != next->context.ldt))
7614 load_LDT_nolock(&next->context);
7615 - }
7616 +
7617 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7618 + if (!(__supported_pte_mask & _PAGE_NX)) {
7619 + smp_mb__before_clear_bit();
7620 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7621 + smp_mb__after_clear_bit();
7622 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7623 + }
7624 +#endif
7625 +
7626 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7627 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7628 + prev->context.user_cs_limit != next->context.user_cs_limit))
7629 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7630 #ifdef CONFIG_SMP
7631 + else if (unlikely(tlbstate != TLBSTATE_OK))
7632 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7633 +#endif
7634 +#endif
7635 +
7636 + }
7637 else {
7638 +
7639 +#ifdef CONFIG_PAX_PER_CPU_PGD
7640 + pax_open_kernel();
7641 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7642 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7643 + pax_close_kernel();
7644 + load_cr3(get_cpu_pgd(cpu));
7645 +#endif
7646 +
7647 +#ifdef CONFIG_SMP
7648 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7649 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7650
7651 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
7652 * tlb flush IPI delivery. We must reload CR3
7653 * to make sure to use no freed page tables.
7654 */
7655 +
7656 +#ifndef CONFIG_PAX_PER_CPU_PGD
7657 load_cr3(next->pgd);
7658 +#endif
7659 +
7660 load_LDT_nolock(&next->context);
7661 +
7662 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7663 + if (!(__supported_pte_mask & _PAGE_NX))
7664 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7665 +#endif
7666 +
7667 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7668 +#ifdef CONFIG_PAX_PAGEEXEC
7669 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7670 +#endif
7671 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7672 +#endif
7673 +
7674 }
7675 - }
7676 #endif
7677 + }
7678 }
7679
7680 #define activate_mm(prev, next) \
7681 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu.h linux-2.6.39.4/arch/x86/include/asm/mmu.h
7682 --- linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-05-19 00:06:34.000000000 -0400
7683 +++ linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-08-05 19:44:33.000000000 -0400
7684 @@ -9,10 +9,22 @@
7685 * we put the segment information here.
7686 */
7687 typedef struct {
7688 - void *ldt;
7689 + struct desc_struct *ldt;
7690 int size;
7691 struct mutex lock;
7692 - void *vdso;
7693 + unsigned long vdso;
7694 +
7695 +#ifdef CONFIG_X86_32
7696 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7697 + unsigned long user_cs_base;
7698 + unsigned long user_cs_limit;
7699 +
7700 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7701 + cpumask_t cpu_user_cs_mask;
7702 +#endif
7703 +
7704 +#endif
7705 +#endif
7706
7707 #ifdef CONFIG_X86_64
7708 /* True if mm supports a task running in 32 bit compatibility mode. */
7709 diff -urNp linux-2.6.39.4/arch/x86/include/asm/module.h linux-2.6.39.4/arch/x86/include/asm/module.h
7710 --- linux-2.6.39.4/arch/x86/include/asm/module.h 2011-05-19 00:06:34.000000000 -0400
7711 +++ linux-2.6.39.4/arch/x86/include/asm/module.h 2011-08-05 19:44:33.000000000 -0400
7712 @@ -5,6 +5,7 @@
7713
7714 #ifdef CONFIG_X86_64
7715 /* X86_64 does not define MODULE_PROC_FAMILY */
7716 +#define MODULE_PROC_FAMILY ""
7717 #elif defined CONFIG_M386
7718 #define MODULE_PROC_FAMILY "386 "
7719 #elif defined CONFIG_M486
7720 @@ -59,8 +60,30 @@
7721 #error unknown processor family
7722 #endif
7723
7724 -#ifdef CONFIG_X86_32
7725 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7726 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7727 +#define MODULE_PAX_UDEREF "UDEREF "
7728 +#else
7729 +#define MODULE_PAX_UDEREF ""
7730 +#endif
7731 +
7732 +#ifdef CONFIG_PAX_KERNEXEC
7733 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7734 +#else
7735 +#define MODULE_PAX_KERNEXEC ""
7736 #endif
7737
7738 +#ifdef CONFIG_PAX_REFCOUNT
7739 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7740 +#else
7741 +#define MODULE_PAX_REFCOUNT ""
7742 +#endif
7743 +
7744 +#ifdef CONFIG_GRKERNSEC
7745 +#define MODULE_GRSEC "GRSECURITY "
7746 +#else
7747 +#define MODULE_GRSEC ""
7748 +#endif
7749 +
7750 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7751 +
7752 #endif /* _ASM_X86_MODULE_H */
7753 diff -urNp linux-2.6.39.4/arch/x86/include/asm/page_64_types.h linux-2.6.39.4/arch/x86/include/asm/page_64_types.h
7754 --- linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-05-19 00:06:34.000000000 -0400
7755 +++ linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-08-05 19:44:33.000000000 -0400
7756 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7757
7758 /* duplicated to the one in bootmem.h */
7759 extern unsigned long max_pfn;
7760 -extern unsigned long phys_base;
7761 +extern const unsigned long phys_base;
7762
7763 extern unsigned long __phys_addr(unsigned long);
7764 #define __phys_reloc_hide(x) (x)
7765 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt.h linux-2.6.39.4/arch/x86/include/asm/paravirt.h
7766 --- linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-05-19 00:06:34.000000000 -0400
7767 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-08-05 19:44:33.000000000 -0400
7768 @@ -739,6 +739,21 @@ static inline void __set_fixmap(unsigned
7769 pv_mmu_ops.set_fixmap(idx, phys, flags);
7770 }
7771
7772 +#ifdef CONFIG_PAX_KERNEXEC
7773 +static inline unsigned long pax_open_kernel(void)
7774 +{
7775 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7776 +}
7777 +
7778 +static inline unsigned long pax_close_kernel(void)
7779 +{
7780 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7781 +}
7782 +#else
7783 +static inline unsigned long pax_open_kernel(void) { return 0; }
7784 +static inline unsigned long pax_close_kernel(void) { return 0; }
7785 +#endif
7786 +
7787 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7788
7789 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7790 @@ -955,7 +970,7 @@ extern void default_banner(void);
7791
7792 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7793 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7794 -#define PARA_INDIRECT(addr) *%cs:addr
7795 +#define PARA_INDIRECT(addr) *%ss:addr
7796 #endif
7797
7798 #define INTERRUPT_RETURN \
7799 @@ -1032,6 +1047,21 @@ extern void default_banner(void);
7800 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7801 CLBR_NONE, \
7802 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7803 +
7804 +#define GET_CR0_INTO_RDI \
7805 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7806 + mov %rax,%rdi
7807 +
7808 +#define SET_RDI_INTO_CR0 \
7809 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7810 +
7811 +#define GET_CR3_INTO_RDI \
7812 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7813 + mov %rax,%rdi
7814 +
7815 +#define SET_RDI_INTO_CR3 \
7816 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7817 +
7818 #endif /* CONFIG_X86_32 */
7819
7820 #endif /* __ASSEMBLY__ */
7821 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h
7822 --- linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-05-19 00:06:34.000000000 -0400
7823 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:34:06.000000000 -0400
7824 @@ -78,19 +78,19 @@ struct pv_init_ops {
7825 */
7826 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7827 unsigned long addr, unsigned len);
7828 -};
7829 +} __no_const;
7830
7831
7832 struct pv_lazy_ops {
7833 /* Set deferred update mode, used for batching operations. */
7834 void (*enter)(void);
7835 void (*leave)(void);
7836 -};
7837 +} __no_const;
7838
7839 struct pv_time_ops {
7840 unsigned long long (*sched_clock)(void);
7841 unsigned long (*get_tsc_khz)(void);
7842 -};
7843 +} __no_const;
7844
7845 struct pv_cpu_ops {
7846 /* hooks for various privileged instructions */
7847 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7848
7849 void (*start_context_switch)(struct task_struct *prev);
7850 void (*end_context_switch)(struct task_struct *next);
7851 -};
7852 +} __no_const;
7853
7854 struct pv_irq_ops {
7855 /*
7856 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7857 unsigned long start_eip,
7858 unsigned long start_esp);
7859 #endif
7860 -};
7861 +} __no_const;
7862
7863 struct pv_mmu_ops {
7864 unsigned long (*read_cr2)(void);
7865 @@ -317,6 +317,12 @@ struct pv_mmu_ops {
7866 an mfn. We can tell which is which from the index. */
7867 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7868 phys_addr_t phys, pgprot_t flags);
7869 +
7870 +#ifdef CONFIG_PAX_KERNEXEC
7871 + unsigned long (*pax_open_kernel)(void);
7872 + unsigned long (*pax_close_kernel)(void);
7873 +#endif
7874 +
7875 };
7876
7877 struct arch_spinlock;
7878 @@ -327,7 +333,7 @@ struct pv_lock_ops {
7879 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7880 int (*spin_trylock)(struct arch_spinlock *lock);
7881 void (*spin_unlock)(struct arch_spinlock *lock);
7882 -};
7883 +} __no_const;
7884
7885 /* This contains all the paravirt structures: we get a convenient
7886 * number for each function using the offset which we use to indicate
7887 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgalloc.h linux-2.6.39.4/arch/x86/include/asm/pgalloc.h
7888 --- linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-05-19 00:06:34.000000000 -0400
7889 +++ linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-08-05 19:44:33.000000000 -0400
7890 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7891 pmd_t *pmd, pte_t *pte)
7892 {
7893 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7894 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7895 +}
7896 +
7897 +static inline void pmd_populate_user(struct mm_struct *mm,
7898 + pmd_t *pmd, pte_t *pte)
7899 +{
7900 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7901 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7902 }
7903
7904 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h
7905 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-05-19 00:06:34.000000000 -0400
7906 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-08-05 19:44:33.000000000 -0400
7907 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7908
7909 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7910 {
7911 + pax_open_kernel();
7912 *pmdp = pmd;
7913 + pax_close_kernel();
7914 }
7915
7916 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7917 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h
7918 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
7919 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
7920 @@ -25,9 +25,6 @@
7921 struct mm_struct;
7922 struct vm_area_struct;
7923
7924 -extern pgd_t swapper_pg_dir[1024];
7925 -extern pgd_t initial_page_table[1024];
7926 -
7927 static inline void pgtable_cache_init(void) { }
7928 static inline void check_pgt_cache(void) { }
7929 void paging_init(void);
7930 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7931 # include <asm/pgtable-2level.h>
7932 #endif
7933
7934 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7935 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7936 +#ifdef CONFIG_X86_PAE
7937 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7938 +#endif
7939 +
7940 #if defined(CONFIG_HIGHPTE)
7941 #define pte_offset_map(dir, address) \
7942 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7943 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7944 /* Clear a kernel PTE and flush it from the TLB */
7945 #define kpte_clear_flush(ptep, vaddr) \
7946 do { \
7947 + pax_open_kernel(); \
7948 pte_clear(&init_mm, (vaddr), (ptep)); \
7949 + pax_close_kernel(); \
7950 __flush_tlb_one((vaddr)); \
7951 } while (0)
7952
7953 @@ -74,6 +79,9 @@ do { \
7954
7955 #endif /* !__ASSEMBLY__ */
7956
7957 +#define HAVE_ARCH_UNMAPPED_AREA
7958 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7959 +
7960 /*
7961 * kern_addr_valid() is (1) for FLATMEM and (0) for
7962 * SPARSEMEM and DISCONTIGMEM
7963 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h
7964 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-05-19 00:06:34.000000000 -0400
7965 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-05 19:44:33.000000000 -0400
7966 @@ -8,7 +8,7 @@
7967 */
7968 #ifdef CONFIG_X86_PAE
7969 # include <asm/pgtable-3level_types.h>
7970 -# define PMD_SIZE (1UL << PMD_SHIFT)
7971 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
7972 # define PMD_MASK (~(PMD_SIZE - 1))
7973 #else
7974 # include <asm/pgtable-2level_types.h>
7975 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
7976 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
7977 #endif
7978
7979 +#ifdef CONFIG_PAX_KERNEXEC
7980 +#ifndef __ASSEMBLY__
7981 +extern unsigned char MODULES_EXEC_VADDR[];
7982 +extern unsigned char MODULES_EXEC_END[];
7983 +#endif
7984 +#include <asm/boot.h>
7985 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
7986 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
7987 +#else
7988 +#define ktla_ktva(addr) (addr)
7989 +#define ktva_ktla(addr) (addr)
7990 +#endif
7991 +
7992 #define MODULES_VADDR VMALLOC_START
7993 #define MODULES_END VMALLOC_END
7994 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
7995 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h
7996 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-05-19 00:06:34.000000000 -0400
7997 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-08-05 19:44:33.000000000 -0400
7998 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
7999
8000 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8001 {
8002 + pax_open_kernel();
8003 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8004 + pax_close_kernel();
8005 }
8006
8007 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8008 {
8009 + pax_open_kernel();
8010 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8011 + pax_close_kernel();
8012 }
8013
8014 /*
8015 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h
8016 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-05-19 00:06:34.000000000 -0400
8017 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-08-05 19:44:33.000000000 -0400
8018 @@ -16,10 +16,13 @@
8019
8020 extern pud_t level3_kernel_pgt[512];
8021 extern pud_t level3_ident_pgt[512];
8022 +extern pud_t level3_vmalloc_pgt[512];
8023 +extern pud_t level3_vmemmap_pgt[512];
8024 +extern pud_t level2_vmemmap_pgt[512];
8025 extern pmd_t level2_kernel_pgt[512];
8026 extern pmd_t level2_fixmap_pgt[512];
8027 -extern pmd_t level2_ident_pgt[512];
8028 -extern pgd_t init_level4_pgt[];
8029 +extern pmd_t level2_ident_pgt[512*2];
8030 +extern pgd_t init_level4_pgt[512];
8031
8032 #define swapper_pg_dir init_level4_pgt
8033
8034 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8035
8036 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8037 {
8038 + pax_open_kernel();
8039 *pmdp = pmd;
8040 + pax_close_kernel();
8041 }
8042
8043 static inline void native_pmd_clear(pmd_t *pmd)
8044 @@ -107,7 +112,9 @@ static inline void native_pud_clear(pud_
8045
8046 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8047 {
8048 + pax_open_kernel();
8049 *pgdp = pgd;
8050 + pax_close_kernel();
8051 }
8052
8053 static inline void native_pgd_clear(pgd_t *pgd)
8054 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h
8055 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-05-19 00:06:34.000000000 -0400
8056 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-05 19:44:33.000000000 -0400
8057 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8058 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8059 #define MODULES_END _AC(0xffffffffff000000, UL)
8060 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8061 +#define MODULES_EXEC_VADDR MODULES_VADDR
8062 +#define MODULES_EXEC_END MODULES_END
8063 +
8064 +#define ktla_ktva(addr) (addr)
8065 +#define ktva_ktla(addr) (addr)
8066
8067 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8068 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable.h linux-2.6.39.4/arch/x86/include/asm/pgtable.h
8069 --- linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
8070 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
8071 @@ -81,12 +81,51 @@ extern struct mm_struct *pgd_page_get_mm
8072
8073 #define arch_end_context_switch(prev) do {} while(0)
8074
8075 +#define pax_open_kernel() native_pax_open_kernel()
8076 +#define pax_close_kernel() native_pax_close_kernel()
8077 #endif /* CONFIG_PARAVIRT */
8078
8079 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8080 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8081 +
8082 +#ifdef CONFIG_PAX_KERNEXEC
8083 +static inline unsigned long native_pax_open_kernel(void)
8084 +{
8085 + unsigned long cr0;
8086 +
8087 + preempt_disable();
8088 + barrier();
8089 + cr0 = read_cr0() ^ X86_CR0_WP;
8090 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8091 + write_cr0(cr0);
8092 + return cr0 ^ X86_CR0_WP;
8093 +}
8094 +
8095 +static inline unsigned long native_pax_close_kernel(void)
8096 +{
8097 + unsigned long cr0;
8098 +
8099 + cr0 = read_cr0() ^ X86_CR0_WP;
8100 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8101 + write_cr0(cr0);
8102 + barrier();
8103 + preempt_enable_no_resched();
8104 + return cr0 ^ X86_CR0_WP;
8105 +}
8106 +#else
8107 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8108 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8109 +#endif
8110 +
8111 /*
8112 * The following only work if pte_present() is true.
8113 * Undefined behaviour if not..
8114 */
8115 +static inline int pte_user(pte_t pte)
8116 +{
8117 + return pte_val(pte) & _PAGE_USER;
8118 +}
8119 +
8120 static inline int pte_dirty(pte_t pte)
8121 {
8122 return pte_flags(pte) & _PAGE_DIRTY;
8123 @@ -196,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t
8124 return pte_clear_flags(pte, _PAGE_RW);
8125 }
8126
8127 +static inline pte_t pte_mkread(pte_t pte)
8128 +{
8129 + return __pte(pte_val(pte) | _PAGE_USER);
8130 +}
8131 +
8132 static inline pte_t pte_mkexec(pte_t pte)
8133 {
8134 - return pte_clear_flags(pte, _PAGE_NX);
8135 +#ifdef CONFIG_X86_PAE
8136 + if (__supported_pte_mask & _PAGE_NX)
8137 + return pte_clear_flags(pte, _PAGE_NX);
8138 + else
8139 +#endif
8140 + return pte_set_flags(pte, _PAGE_USER);
8141 +}
8142 +
8143 +static inline pte_t pte_exprotect(pte_t pte)
8144 +{
8145 +#ifdef CONFIG_X86_PAE
8146 + if (__supported_pte_mask & _PAGE_NX)
8147 + return pte_set_flags(pte, _PAGE_NX);
8148 + else
8149 +#endif
8150 + return pte_clear_flags(pte, _PAGE_USER);
8151 }
8152
8153 static inline pte_t pte_mkdirty(pte_t pte)
8154 @@ -390,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long
8155 #endif
8156
8157 #ifndef __ASSEMBLY__
8158 +
8159 +#ifdef CONFIG_PAX_PER_CPU_PGD
8160 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8161 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8162 +{
8163 + return cpu_pgd[cpu];
8164 +}
8165 +#endif
8166 +
8167 #include <linux/mm_types.h>
8168
8169 static inline int pte_none(pte_t pte)
8170 @@ -560,7 +628,7 @@ static inline pud_t *pud_offset(pgd_t *p
8171
8172 static inline int pgd_bad(pgd_t pgd)
8173 {
8174 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8175 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8176 }
8177
8178 static inline int pgd_none(pgd_t pgd)
8179 @@ -583,7 +651,12 @@ static inline int pgd_none(pgd_t pgd)
8180 * pgd_offset() returns a (pgd_t *)
8181 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8182 */
8183 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8184 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8185 +
8186 +#ifdef CONFIG_PAX_PER_CPU_PGD
8187 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8188 +#endif
8189 +
8190 /*
8191 * a shortcut which implies the use of the kernel's pgd, instead
8192 * of a process's
8193 @@ -594,6 +667,20 @@ static inline int pgd_none(pgd_t pgd)
8194 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8195 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8196
8197 +#ifdef CONFIG_X86_32
8198 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8199 +#else
8200 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8201 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8202 +
8203 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8204 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8205 +#else
8206 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8207 +#endif
8208 +
8209 +#endif
8210 +
8211 #ifndef __ASSEMBLY__
8212
8213 extern int direct_gbpages;
8214 @@ -758,11 +845,23 @@ static inline void pmdp_set_wrprotect(st
8215 * dst and src can be on the same page, but the range must not overlap,
8216 * and must not cross a page boundary.
8217 */
8218 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8219 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8220 {
8221 - memcpy(dst, src, count * sizeof(pgd_t));
8222 + pax_open_kernel();
8223 + while (count--)
8224 + *dst++ = *src++;
8225 + pax_close_kernel();
8226 }
8227
8228 +#ifdef CONFIG_PAX_PER_CPU_PGD
8229 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8230 +#endif
8231 +
8232 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8233 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8234 +#else
8235 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8236 +#endif
8237
8238 #include <asm-generic/pgtable.h>
8239 #endif /* __ASSEMBLY__ */
8240 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h
8241 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-05-19 00:06:34.000000000 -0400
8242 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-08-05 19:44:33.000000000 -0400
8243 @@ -16,13 +16,12 @@
8244 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8245 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8246 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8247 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8248 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8249 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8250 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8251 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8252 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8253 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8254 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8255 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8256 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8257 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8258
8259 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8260 @@ -40,7 +39,6 @@
8261 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8262 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8263 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8264 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8265 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8266 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8267 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8268 @@ -57,8 +55,10 @@
8269
8270 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8271 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8272 -#else
8273 +#elif defined(CONFIG_KMEMCHECK)
8274 #define _PAGE_NX (_AT(pteval_t, 0))
8275 +#else
8276 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8277 #endif
8278
8279 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8280 @@ -96,6 +96,9 @@
8281 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8282 _PAGE_ACCESSED)
8283
8284 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8285 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8286 +
8287 #define __PAGE_KERNEL_EXEC \
8288 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8289 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8290 @@ -106,8 +109,8 @@
8291 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8292 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8293 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8294 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8295 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8296 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8297 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8298 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8299 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8300 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8301 @@ -166,8 +169,8 @@
8302 * bits are combined, this will alow user to access the high address mapped
8303 * VDSO in the presence of CONFIG_COMPAT_VDSO
8304 */
8305 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8306 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8307 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8308 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8309 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8310 #endif
8311
8312 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8313 {
8314 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8315 }
8316 +#endif
8317
8318 +#if PAGETABLE_LEVELS == 3
8319 +#include <asm-generic/pgtable-nopud.h>
8320 +#endif
8321 +
8322 +#if PAGETABLE_LEVELS == 2
8323 +#include <asm-generic/pgtable-nopmd.h>
8324 +#endif
8325 +
8326 +#ifndef __ASSEMBLY__
8327 #if PAGETABLE_LEVELS > 3
8328 typedef struct { pudval_t pud; } pud_t;
8329
8330 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8331 return pud.pud;
8332 }
8333 #else
8334 -#include <asm-generic/pgtable-nopud.h>
8335 -
8336 static inline pudval_t native_pud_val(pud_t pud)
8337 {
8338 return native_pgd_val(pud.pgd);
8339 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8340 return pmd.pmd;
8341 }
8342 #else
8343 -#include <asm-generic/pgtable-nopmd.h>
8344 -
8345 static inline pmdval_t native_pmd_val(pmd_t pmd)
8346 {
8347 return native_pgd_val(pmd.pud.pgd);
8348 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8349
8350 extern pteval_t __supported_pte_mask;
8351 extern void set_nx(void);
8352 -extern int nx_enabled;
8353
8354 #define pgprot_writecombine pgprot_writecombine
8355 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8356 diff -urNp linux-2.6.39.4/arch/x86/include/asm/processor.h linux-2.6.39.4/arch/x86/include/asm/processor.h
8357 --- linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-05-19 00:06:34.000000000 -0400
8358 +++ linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-08-05 19:44:33.000000000 -0400
8359 @@ -266,7 +266,7 @@ struct tss_struct {
8360
8361 } ____cacheline_aligned;
8362
8363 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8364 +extern struct tss_struct init_tss[NR_CPUS];
8365
8366 /*
8367 * Save the original ist values for checking stack pointers during debugging
8368 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8369 */
8370 #define TASK_SIZE PAGE_OFFSET
8371 #define TASK_SIZE_MAX TASK_SIZE
8372 +
8373 +#ifdef CONFIG_PAX_SEGMEXEC
8374 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8375 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8376 +#else
8377 #define STACK_TOP TASK_SIZE
8378 -#define STACK_TOP_MAX STACK_TOP
8379 +#endif
8380 +
8381 +#define STACK_TOP_MAX TASK_SIZE
8382
8383 #define INIT_THREAD { \
8384 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8385 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8386 .vm86_info = NULL, \
8387 .sysenter_cs = __KERNEL_CS, \
8388 .io_bitmap_ptr = NULL, \
8389 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8390 */
8391 #define INIT_TSS { \
8392 .x86_tss = { \
8393 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8394 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8395 .ss0 = __KERNEL_DS, \
8396 .ss1 = __KERNEL_CS, \
8397 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8398 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8399 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8400
8401 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8402 -#define KSTK_TOP(info) \
8403 -({ \
8404 - unsigned long *__ptr = (unsigned long *)(info); \
8405 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8406 -})
8407 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8408
8409 /*
8410 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8411 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8412 #define task_pt_regs(task) \
8413 ({ \
8414 struct pt_regs *__regs__; \
8415 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8416 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8417 __regs__ - 1; \
8418 })
8419
8420 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8421 /*
8422 * User space process size. 47bits minus one guard page.
8423 */
8424 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8425 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8426
8427 /* This decides where the kernel will search for a free chunk of vm
8428 * space during mmap's.
8429 */
8430 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8431 - 0xc0000000 : 0xFFFFe000)
8432 + 0xc0000000 : 0xFFFFf000)
8433
8434 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8435 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8436 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8437 #define STACK_TOP_MAX TASK_SIZE_MAX
8438
8439 #define INIT_THREAD { \
8440 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8441 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8442 }
8443
8444 #define INIT_TSS { \
8445 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8446 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8447 }
8448
8449 /*
8450 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8451 */
8452 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8453
8454 +#ifdef CONFIG_PAX_SEGMEXEC
8455 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8456 +#endif
8457 +
8458 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8459
8460 /* Get/set a process' ability to use the timestamp counter instruction */
8461 diff -urNp linux-2.6.39.4/arch/x86/include/asm/ptrace.h linux-2.6.39.4/arch/x86/include/asm/ptrace.h
8462 --- linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-05-19 00:06:34.000000000 -0400
8463 +++ linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-08-05 19:44:33.000000000 -0400
8464 @@ -152,28 +152,29 @@ static inline unsigned long regs_return_
8465 }
8466
8467 /*
8468 - * user_mode_vm(regs) determines whether a register set came from user mode.
8469 + * user_mode(regs) determines whether a register set came from user mode.
8470 * This is true if V8086 mode was enabled OR if the register set was from
8471 * protected mode with RPL-3 CS value. This tricky test checks that with
8472 * one comparison. Many places in the kernel can bypass this full check
8473 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8474 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8475 + * be used.
8476 */
8477 -static inline int user_mode(struct pt_regs *regs)
8478 +static inline int user_mode_novm(struct pt_regs *regs)
8479 {
8480 #ifdef CONFIG_X86_32
8481 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8482 #else
8483 - return !!(regs->cs & 3);
8484 + return !!(regs->cs & SEGMENT_RPL_MASK);
8485 #endif
8486 }
8487
8488 -static inline int user_mode_vm(struct pt_regs *regs)
8489 +static inline int user_mode(struct pt_regs *regs)
8490 {
8491 #ifdef CONFIG_X86_32
8492 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8493 USER_RPL;
8494 #else
8495 - return user_mode(regs);
8496 + return user_mode_novm(regs);
8497 #endif
8498 }
8499
8500 diff -urNp linux-2.6.39.4/arch/x86/include/asm/reboot.h linux-2.6.39.4/arch/x86/include/asm/reboot.h
8501 --- linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-05-19 00:06:34.000000000 -0400
8502 +++ linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-08-05 20:34:06.000000000 -0400
8503 @@ -6,19 +6,19 @@
8504 struct pt_regs;
8505
8506 struct machine_ops {
8507 - void (*restart)(char *cmd);
8508 - void (*halt)(void);
8509 - void (*power_off)(void);
8510 + void (* __noreturn restart)(char *cmd);
8511 + void (* __noreturn halt)(void);
8512 + void (* __noreturn power_off)(void);
8513 void (*shutdown)(void);
8514 void (*crash_shutdown)(struct pt_regs *);
8515 - void (*emergency_restart)(void);
8516 -};
8517 + void (* __noreturn emergency_restart)(void);
8518 +} __no_const;
8519
8520 extern struct machine_ops machine_ops;
8521
8522 void native_machine_crash_shutdown(struct pt_regs *regs);
8523 void native_machine_shutdown(void);
8524 -void machine_real_restart(unsigned int type);
8525 +void machine_real_restart(unsigned int type) __noreturn;
8526 /* These must match dispatch_table in reboot_32.S */
8527 #define MRR_BIOS 0
8528 #define MRR_APM 1
8529 diff -urNp linux-2.6.39.4/arch/x86/include/asm/rwsem.h linux-2.6.39.4/arch/x86/include/asm/rwsem.h
8530 --- linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-05-19 00:06:34.000000000 -0400
8531 +++ linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-08-05 19:44:33.000000000 -0400
8532 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8533 {
8534 asm volatile("# beginning down_read\n\t"
8535 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8536 +
8537 +#ifdef CONFIG_PAX_REFCOUNT
8538 + "jno 0f\n"
8539 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8540 + "int $4\n0:\n"
8541 + _ASM_EXTABLE(0b, 0b)
8542 +#endif
8543 +
8544 /* adds 0x00000001 */
8545 " jns 1f\n"
8546 " call call_rwsem_down_read_failed\n"
8547 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8548 "1:\n\t"
8549 " mov %1,%2\n\t"
8550 " add %3,%2\n\t"
8551 +
8552 +#ifdef CONFIG_PAX_REFCOUNT
8553 + "jno 0f\n"
8554 + "sub %3,%2\n"
8555 + "int $4\n0:\n"
8556 + _ASM_EXTABLE(0b, 0b)
8557 +#endif
8558 +
8559 " jle 2f\n\t"
8560 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8561 " jnz 1b\n\t"
8562 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8563 long tmp;
8564 asm volatile("# beginning down_write\n\t"
8565 LOCK_PREFIX " xadd %1,(%2)\n\t"
8566 +
8567 +#ifdef CONFIG_PAX_REFCOUNT
8568 + "jno 0f\n"
8569 + "mov %1,(%2)\n"
8570 + "int $4\n0:\n"
8571 + _ASM_EXTABLE(0b, 0b)
8572 +#endif
8573 +
8574 /* adds 0xffff0001, returns the old value */
8575 " test %1,%1\n\t"
8576 /* was the count 0 before? */
8577 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8578 long tmp;
8579 asm volatile("# beginning __up_read\n\t"
8580 LOCK_PREFIX " xadd %1,(%2)\n\t"
8581 +
8582 +#ifdef CONFIG_PAX_REFCOUNT
8583 + "jno 0f\n"
8584 + "mov %1,(%2)\n"
8585 + "int $4\n0:\n"
8586 + _ASM_EXTABLE(0b, 0b)
8587 +#endif
8588 +
8589 /* subtracts 1, returns the old value */
8590 " jns 1f\n\t"
8591 " call call_rwsem_wake\n" /* expects old value in %edx */
8592 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8593 long tmp;
8594 asm volatile("# beginning __up_write\n\t"
8595 LOCK_PREFIX " xadd %1,(%2)\n\t"
8596 +
8597 +#ifdef CONFIG_PAX_REFCOUNT
8598 + "jno 0f\n"
8599 + "mov %1,(%2)\n"
8600 + "int $4\n0:\n"
8601 + _ASM_EXTABLE(0b, 0b)
8602 +#endif
8603 +
8604 /* subtracts 0xffff0001, returns the old value */
8605 " jns 1f\n\t"
8606 " call call_rwsem_wake\n" /* expects old value in %edx */
8607 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8608 {
8609 asm volatile("# beginning __downgrade_write\n\t"
8610 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8611 +
8612 +#ifdef CONFIG_PAX_REFCOUNT
8613 + "jno 0f\n"
8614 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8615 + "int $4\n0:\n"
8616 + _ASM_EXTABLE(0b, 0b)
8617 +#endif
8618 +
8619 /*
8620 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8621 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8622 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8623 */
8624 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8625 {
8626 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8627 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8628 +
8629 +#ifdef CONFIG_PAX_REFCOUNT
8630 + "jno 0f\n"
8631 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8632 + "int $4\n0:\n"
8633 + _ASM_EXTABLE(0b, 0b)
8634 +#endif
8635 +
8636 : "+m" (sem->count)
8637 : "er" (delta));
8638 }
8639 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8640 {
8641 long tmp = delta;
8642
8643 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8644 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8645 +
8646 +#ifdef CONFIG_PAX_REFCOUNT
8647 + "jno 0f\n"
8648 + "mov %0,%1\n"
8649 + "int $4\n0:\n"
8650 + _ASM_EXTABLE(0b, 0b)
8651 +#endif
8652 +
8653 : "+r" (tmp), "+m" (sem->count)
8654 : : "memory");
8655
8656 diff -urNp linux-2.6.39.4/arch/x86/include/asm/segment.h linux-2.6.39.4/arch/x86/include/asm/segment.h
8657 --- linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-05-19 00:06:34.000000000 -0400
8658 +++ linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-08-05 19:44:33.000000000 -0400
8659 @@ -64,8 +64,8 @@
8660 * 26 - ESPFIX small SS
8661 * 27 - per-cpu [ offset to per-cpu data area ]
8662 * 28 - stack_canary-20 [ for stack protector ]
8663 - * 29 - unused
8664 - * 30 - unused
8665 + * 29 - PCI BIOS CS
8666 + * 30 - PCI BIOS DS
8667 * 31 - TSS for double fault handler
8668 */
8669 #define GDT_ENTRY_TLS_MIN 6
8670 @@ -79,6 +79,8 @@
8671
8672 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8673
8674 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8675 +
8676 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8677
8678 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8679 @@ -104,6 +106,12 @@
8680 #define __KERNEL_STACK_CANARY 0
8681 #endif
8682
8683 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8684 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8685 +
8686 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8687 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8688 +
8689 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8690
8691 /*
8692 @@ -141,7 +149,7 @@
8693 */
8694
8695 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8696 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8697 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8698
8699
8700 #else
8701 @@ -165,6 +173,8 @@
8702 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8703 #define __USER32_DS __USER_DS
8704
8705 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8706 +
8707 #define GDT_ENTRY_TSS 8 /* needs two entries */
8708 #define GDT_ENTRY_LDT 10 /* needs two entries */
8709 #define GDT_ENTRY_TLS_MIN 12
8710 @@ -185,6 +195,7 @@
8711 #endif
8712
8713 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8714 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8715 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8716 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8717 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8718 diff -urNp linux-2.6.39.4/arch/x86/include/asm/smp.h linux-2.6.39.4/arch/x86/include/asm/smp.h
8719 --- linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-05-19 00:06:34.000000000 -0400
8720 +++ linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-08-05 20:34:06.000000000 -0400
8721 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8722 /* cpus sharing the last level cache: */
8723 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8724 DECLARE_PER_CPU(u16, cpu_llc_id);
8725 -DECLARE_PER_CPU(int, cpu_number);
8726 +DECLARE_PER_CPU(unsigned int, cpu_number);
8727
8728 static inline struct cpumask *cpu_sibling_mask(int cpu)
8729 {
8730 @@ -77,7 +77,7 @@ struct smp_ops {
8731
8732 void (*send_call_func_ipi)(const struct cpumask *mask);
8733 void (*send_call_func_single_ipi)(int cpu);
8734 -};
8735 +} __no_const;
8736
8737 /* Globals due to paravirt */
8738 extern void set_cpu_sibling_map(int cpu);
8739 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8740 extern int safe_smp_processor_id(void);
8741
8742 #elif defined(CONFIG_X86_64_SMP)
8743 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8744 -
8745 -#define stack_smp_processor_id() \
8746 -({ \
8747 - struct thread_info *ti; \
8748 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8749 - ti->cpu; \
8750 -})
8751 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8752 +#define stack_smp_processor_id() raw_smp_processor_id()
8753 #define safe_smp_processor_id() smp_processor_id()
8754
8755 #endif
8756 diff -urNp linux-2.6.39.4/arch/x86/include/asm/spinlock.h linux-2.6.39.4/arch/x86/include/asm/spinlock.h
8757 --- linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
8758 +++ linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
8759 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8760 static inline void arch_read_lock(arch_rwlock_t *rw)
8761 {
8762 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8763 +
8764 +#ifdef CONFIG_PAX_REFCOUNT
8765 + "jno 0f\n"
8766 + LOCK_PREFIX " addl $1,(%0)\n"
8767 + "int $4\n0:\n"
8768 + _ASM_EXTABLE(0b, 0b)
8769 +#endif
8770 +
8771 "jns 1f\n"
8772 "call __read_lock_failed\n\t"
8773 "1:\n"
8774 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8775 static inline void arch_write_lock(arch_rwlock_t *rw)
8776 {
8777 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8778 +
8779 +#ifdef CONFIG_PAX_REFCOUNT
8780 + "jno 0f\n"
8781 + LOCK_PREFIX " addl %1,(%0)\n"
8782 + "int $4\n0:\n"
8783 + _ASM_EXTABLE(0b, 0b)
8784 +#endif
8785 +
8786 "jz 1f\n"
8787 "call __write_lock_failed\n\t"
8788 "1:\n"
8789 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8790
8791 static inline void arch_read_unlock(arch_rwlock_t *rw)
8792 {
8793 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8794 + asm volatile(LOCK_PREFIX "incl %0\n"
8795 +
8796 +#ifdef CONFIG_PAX_REFCOUNT
8797 + "jno 0f\n"
8798 + LOCK_PREFIX "decl %0\n"
8799 + "int $4\n0:\n"
8800 + _ASM_EXTABLE(0b, 0b)
8801 +#endif
8802 +
8803 + :"+m" (rw->lock) : : "memory");
8804 }
8805
8806 static inline void arch_write_unlock(arch_rwlock_t *rw)
8807 {
8808 - asm volatile(LOCK_PREFIX "addl %1, %0"
8809 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8810 +
8811 +#ifdef CONFIG_PAX_REFCOUNT
8812 + "jno 0f\n"
8813 + LOCK_PREFIX "subl %1, %0\n"
8814 + "int $4\n0:\n"
8815 + _ASM_EXTABLE(0b, 0b)
8816 +#endif
8817 +
8818 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8819 }
8820
8821 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stackprotector.h linux-2.6.39.4/arch/x86/include/asm/stackprotector.h
8822 --- linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-05-19 00:06:34.000000000 -0400
8823 +++ linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-08-05 19:44:33.000000000 -0400
8824 @@ -48,7 +48,7 @@
8825 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8826 */
8827 #define GDT_STACK_CANARY_INIT \
8828 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8829 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8830
8831 /*
8832 * Initialize the stackprotector canary value.
8833 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8834
8835 static inline void load_stack_canary_segment(void)
8836 {
8837 -#ifdef CONFIG_X86_32
8838 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8839 asm volatile ("mov %0, %%gs" : : "r" (0));
8840 #endif
8841 }
8842 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stacktrace.h linux-2.6.39.4/arch/x86/include/asm/stacktrace.h
8843 --- linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-05-19 00:06:34.000000000 -0400
8844 +++ linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-08-05 19:44:33.000000000 -0400
8845 @@ -11,28 +11,20 @@
8846
8847 extern int kstack_depth_to_print;
8848
8849 -struct thread_info;
8850 +struct task_struct;
8851 struct stacktrace_ops;
8852
8853 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8854 - unsigned long *stack,
8855 - unsigned long bp,
8856 - const struct stacktrace_ops *ops,
8857 - void *data,
8858 - unsigned long *end,
8859 - int *graph);
8860 -
8861 -extern unsigned long
8862 -print_context_stack(struct thread_info *tinfo,
8863 - unsigned long *stack, unsigned long bp,
8864 - const struct stacktrace_ops *ops, void *data,
8865 - unsigned long *end, int *graph);
8866 -
8867 -extern unsigned long
8868 -print_context_stack_bp(struct thread_info *tinfo,
8869 - unsigned long *stack, unsigned long bp,
8870 - const struct stacktrace_ops *ops, void *data,
8871 - unsigned long *end, int *graph);
8872 +typedef unsigned long walk_stack_t(struct task_struct *task,
8873 + void *stack_start,
8874 + unsigned long *stack,
8875 + unsigned long bp,
8876 + const struct stacktrace_ops *ops,
8877 + void *data,
8878 + unsigned long *end,
8879 + int *graph);
8880 +
8881 +extern walk_stack_t print_context_stack;
8882 +extern walk_stack_t print_context_stack_bp;
8883
8884 /* Generic stack tracer with callbacks */
8885
8886 @@ -43,7 +35,7 @@ struct stacktrace_ops {
8887 void (*address)(void *data, unsigned long address, int reliable);
8888 /* On negative return stop dumping */
8889 int (*stack)(void *data, char *name);
8890 - walk_stack_t walk_stack;
8891 + walk_stack_t *walk_stack;
8892 };
8893
8894 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8895 diff -urNp linux-2.6.39.4/arch/x86/include/asm/system.h linux-2.6.39.4/arch/x86/include/asm/system.h
8896 --- linux-2.6.39.4/arch/x86/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
8897 +++ linux-2.6.39.4/arch/x86/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
8898 @@ -129,7 +129,7 @@ do { \
8899 "call __switch_to\n\t" \
8900 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8901 __switch_canary \
8902 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8903 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8904 "movq %%rax,%%rdi\n\t" \
8905 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8906 "jnz ret_from_fork\n\t" \
8907 @@ -140,7 +140,7 @@ do { \
8908 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8909 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8910 [_tif_fork] "i" (_TIF_FORK), \
8911 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8912 + [thread_info] "m" (current_tinfo), \
8913 [current_task] "m" (current_task) \
8914 __switch_canary_iparam \
8915 : "memory", "cc" __EXTRA_CLOBBER)
8916 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8917 {
8918 unsigned long __limit;
8919 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8920 - return __limit + 1;
8921 + return __limit;
8922 }
8923
8924 static inline void native_clts(void)
8925 @@ -340,12 +340,12 @@ void enable_hlt(void);
8926
8927 void cpu_idle_wait(void);
8928
8929 -extern unsigned long arch_align_stack(unsigned long sp);
8930 +#define arch_align_stack(x) ((x) & ~0xfUL)
8931 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8932
8933 void default_idle(void);
8934
8935 -void stop_this_cpu(void *dummy);
8936 +void stop_this_cpu(void *dummy) __noreturn;
8937
8938 /*
8939 * Force strict CPU ordering.
8940 diff -urNp linux-2.6.39.4/arch/x86/include/asm/thread_info.h linux-2.6.39.4/arch/x86/include/asm/thread_info.h
8941 --- linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-05-19 00:06:34.000000000 -0400
8942 +++ linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-08-05 19:44:33.000000000 -0400
8943 @@ -10,6 +10,7 @@
8944 #include <linux/compiler.h>
8945 #include <asm/page.h>
8946 #include <asm/types.h>
8947 +#include <asm/percpu.h>
8948
8949 /*
8950 * low level task data that entry.S needs immediate access to
8951 @@ -24,7 +25,6 @@ struct exec_domain;
8952 #include <asm/atomic.h>
8953
8954 struct thread_info {
8955 - struct task_struct *task; /* main task structure */
8956 struct exec_domain *exec_domain; /* execution domain */
8957 __u32 flags; /* low level flags */
8958 __u32 status; /* thread synchronous flags */
8959 @@ -34,18 +34,12 @@ struct thread_info {
8960 mm_segment_t addr_limit;
8961 struct restart_block restart_block;
8962 void __user *sysenter_return;
8963 -#ifdef CONFIG_X86_32
8964 - unsigned long previous_esp; /* ESP of the previous stack in
8965 - case of nested (IRQ) stacks
8966 - */
8967 - __u8 supervisor_stack[0];
8968 -#endif
8969 + unsigned long lowest_stack;
8970 int uaccess_err;
8971 };
8972
8973 -#define INIT_THREAD_INFO(tsk) \
8974 +#define INIT_THREAD_INFO \
8975 { \
8976 - .task = &tsk, \
8977 .exec_domain = &default_exec_domain, \
8978 .flags = 0, \
8979 .cpu = 0, \
8980 @@ -56,7 +50,7 @@ struct thread_info {
8981 }, \
8982 }
8983
8984 -#define init_thread_info (init_thread_union.thread_info)
8985 +#define init_thread_info (init_thread_union.stack)
8986 #define init_stack (init_thread_union.stack)
8987
8988 #else /* !__ASSEMBLY__ */
8989 @@ -170,6 +164,23 @@ struct thread_info {
8990 ret; \
8991 })
8992
8993 +#ifdef __ASSEMBLY__
8994 +/* how to get the thread information struct from ASM */
8995 +#define GET_THREAD_INFO(reg) \
8996 + mov PER_CPU_VAR(current_tinfo), reg
8997 +
8998 +/* use this one if reg already contains %esp */
8999 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9000 +#else
9001 +/* how to get the thread information struct from C */
9002 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9003 +
9004 +static __always_inline struct thread_info *current_thread_info(void)
9005 +{
9006 + return percpu_read_stable(current_tinfo);
9007 +}
9008 +#endif
9009 +
9010 #ifdef CONFIG_X86_32
9011
9012 #define STACK_WARN (THREAD_SIZE/8)
9013 @@ -180,35 +191,13 @@ struct thread_info {
9014 */
9015 #ifndef __ASSEMBLY__
9016
9017 -
9018 /* how to get the current stack pointer from C */
9019 register unsigned long current_stack_pointer asm("esp") __used;
9020
9021 -/* how to get the thread information struct from C */
9022 -static inline struct thread_info *current_thread_info(void)
9023 -{
9024 - return (struct thread_info *)
9025 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9026 -}
9027 -
9028 -#else /* !__ASSEMBLY__ */
9029 -
9030 -/* how to get the thread information struct from ASM */
9031 -#define GET_THREAD_INFO(reg) \
9032 - movl $-THREAD_SIZE, reg; \
9033 - andl %esp, reg
9034 -
9035 -/* use this one if reg already contains %esp */
9036 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9037 - andl $-THREAD_SIZE, reg
9038 -
9039 #endif
9040
9041 #else /* X86_32 */
9042
9043 -#include <asm/percpu.h>
9044 -#define KERNEL_STACK_OFFSET (5*8)
9045 -
9046 /*
9047 * macros/functions for gaining access to the thread information structure
9048 * preempt_count needs to be 1 initially, until the scheduler is functional.
9049 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9050 #ifndef __ASSEMBLY__
9051 DECLARE_PER_CPU(unsigned long, kernel_stack);
9052
9053 -static inline struct thread_info *current_thread_info(void)
9054 -{
9055 - struct thread_info *ti;
9056 - ti = (void *)(percpu_read_stable(kernel_stack) +
9057 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9058 - return ti;
9059 -}
9060 -
9061 -#else /* !__ASSEMBLY__ */
9062 -
9063 -/* how to get the thread information struct from ASM */
9064 -#define GET_THREAD_INFO(reg) \
9065 - movq PER_CPU_VAR(kernel_stack),reg ; \
9066 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9067 -
9068 +/* how to get the current stack pointer from C */
9069 +register unsigned long current_stack_pointer asm("rsp") __used;
9070 #endif
9071
9072 #endif /* !X86_32 */
9073 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9074 extern void free_thread_info(struct thread_info *ti);
9075 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9076 #define arch_task_cache_init arch_task_cache_init
9077 +
9078 +#define __HAVE_THREAD_FUNCTIONS
9079 +#define task_thread_info(task) (&(task)->tinfo)
9080 +#define task_stack_page(task) ((task)->stack)
9081 +#define setup_thread_stack(p, org) do {} while (0)
9082 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9083 +
9084 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9085 +extern struct task_struct *alloc_task_struct_node(int node);
9086 +extern void free_task_struct(struct task_struct *);
9087 +
9088 #endif
9089 #endif /* _ASM_X86_THREAD_INFO_H */
9090 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h
9091 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
9092 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
9093 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
9094 static __always_inline unsigned long __must_check
9095 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9096 {
9097 + pax_track_stack();
9098 +
9099 + if ((long)n < 0)
9100 + return n;
9101 +
9102 if (__builtin_constant_p(n)) {
9103 unsigned long ret;
9104
9105 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
9106 return ret;
9107 }
9108 }
9109 + if (!__builtin_constant_p(n))
9110 + check_object_size(from, n, true);
9111 return __copy_to_user_ll(to, from, n);
9112 }
9113
9114 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
9115 __copy_to_user(void __user *to, const void *from, unsigned long n)
9116 {
9117 might_fault();
9118 +
9119 return __copy_to_user_inatomic(to, from, n);
9120 }
9121
9122 static __always_inline unsigned long
9123 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9124 {
9125 + if ((long)n < 0)
9126 + return n;
9127 +
9128 /* Avoid zeroing the tail if the copy fails..
9129 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9130 * but as the zeroing behaviour is only significant when n is not
9131 @@ -138,6 +149,12 @@ static __always_inline unsigned long
9132 __copy_from_user(void *to, const void __user *from, unsigned long n)
9133 {
9134 might_fault();
9135 +
9136 + pax_track_stack();
9137 +
9138 + if ((long)n < 0)
9139 + return n;
9140 +
9141 if (__builtin_constant_p(n)) {
9142 unsigned long ret;
9143
9144 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
9145 return ret;
9146 }
9147 }
9148 + if (!__builtin_constant_p(n))
9149 + check_object_size(to, n, false);
9150 return __copy_from_user_ll(to, from, n);
9151 }
9152
9153 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
9154 const void __user *from, unsigned long n)
9155 {
9156 might_fault();
9157 +
9158 + if ((long)n < 0)
9159 + return n;
9160 +
9161 if (__builtin_constant_p(n)) {
9162 unsigned long ret;
9163
9164 @@ -182,15 +205,19 @@ static __always_inline unsigned long
9165 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9166 unsigned long n)
9167 {
9168 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9169 -}
9170 + if ((long)n < 0)
9171 + return n;
9172
9173 -unsigned long __must_check copy_to_user(void __user *to,
9174 - const void *from, unsigned long n);
9175 -unsigned long __must_check _copy_from_user(void *to,
9176 - const void __user *from,
9177 - unsigned long n);
9178 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9179 +}
9180
9181 +extern void copy_to_user_overflow(void)
9182 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9183 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9184 +#else
9185 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9186 +#endif
9187 +;
9188
9189 extern void copy_from_user_overflow(void)
9190 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9191 @@ -200,17 +227,61 @@ extern void copy_from_user_overflow(void
9192 #endif
9193 ;
9194
9195 -static inline unsigned long __must_check copy_from_user(void *to,
9196 - const void __user *from,
9197 - unsigned long n)
9198 +/**
9199 + * copy_to_user: - Copy a block of data into user space.
9200 + * @to: Destination address, in user space.
9201 + * @from: Source address, in kernel space.
9202 + * @n: Number of bytes to copy.
9203 + *
9204 + * Context: User context only. This function may sleep.
9205 + *
9206 + * Copy data from kernel space to user space.
9207 + *
9208 + * Returns number of bytes that could not be copied.
9209 + * On success, this will be zero.
9210 + */
9211 +static inline unsigned long __must_check
9212 +copy_to_user(void __user *to, const void *from, unsigned long n)
9213 +{
9214 + int sz = __compiletime_object_size(from);
9215 +
9216 + if (unlikely(sz != -1 && sz < n))
9217 + copy_to_user_overflow();
9218 + else if (access_ok(VERIFY_WRITE, to, n))
9219 + n = __copy_to_user(to, from, n);
9220 + return n;
9221 +}
9222 +
9223 +/**
9224 + * copy_from_user: - Copy a block of data from user space.
9225 + * @to: Destination address, in kernel space.
9226 + * @from: Source address, in user space.
9227 + * @n: Number of bytes to copy.
9228 + *
9229 + * Context: User context only. This function may sleep.
9230 + *
9231 + * Copy data from user space to kernel space.
9232 + *
9233 + * Returns number of bytes that could not be copied.
9234 + * On success, this will be zero.
9235 + *
9236 + * If some data could not be copied, this function will pad the copied
9237 + * data to the requested size using zero bytes.
9238 + */
9239 +static inline unsigned long __must_check
9240 +copy_from_user(void *to, const void __user *from, unsigned long n)
9241 {
9242 int sz = __compiletime_object_size(to);
9243
9244 - if (likely(sz == -1 || sz >= n))
9245 - n = _copy_from_user(to, from, n);
9246 - else
9247 + if (unlikely(sz != -1 && sz < n))
9248 copy_from_user_overflow();
9249 -
9250 + else if (access_ok(VERIFY_READ, from, n))
9251 + n = __copy_from_user(to, from, n);
9252 + else if ((long)n > 0) {
9253 + if (!__builtin_constant_p(n))
9254 + check_object_size(to, n, false);
9255 + memset(to, 0, n);
9256 + }
9257 return n;
9258 }
9259
9260 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h
9261 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
9262 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
9263 @@ -11,6 +11,9 @@
9264 #include <asm/alternative.h>
9265 #include <asm/cpufeature.h>
9266 #include <asm/page.h>
9267 +#include <asm/pgtable.h>
9268 +
9269 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9270
9271 /*
9272 * Copy To/From Userspace
9273 @@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *
9274 return ret;
9275 }
9276
9277 -__must_check unsigned long
9278 -_copy_to_user(void __user *to, const void *from, unsigned len);
9279 -__must_check unsigned long
9280 -_copy_from_user(void *to, const void __user *from, unsigned len);
9281 +static __always_inline __must_check unsigned long
9282 +__copy_to_user(void __user *to, const void *from, unsigned len);
9283 +static __always_inline __must_check unsigned long
9284 +__copy_from_user(void *to, const void __user *from, unsigned len);
9285 __must_check unsigned long
9286 copy_in_user(void __user *to, const void __user *from, unsigned len);
9287
9288 static inline unsigned long __must_check copy_from_user(void *to,
9289 const void __user *from,
9290 - unsigned long n)
9291 + unsigned n)
9292 {
9293 - int sz = __compiletime_object_size(to);
9294 -
9295 might_fault();
9296 - if (likely(sz == -1 || sz >= n))
9297 - n = _copy_from_user(to, from, n);
9298 -#ifdef CONFIG_DEBUG_VM
9299 - else
9300 - WARN(1, "Buffer overflow detected!\n");
9301 -#endif
9302 +
9303 + if (access_ok(VERIFY_READ, from, n))
9304 + n = __copy_from_user(to, from, n);
9305 + else if ((int)n > 0) {
9306 + if (!__builtin_constant_p(n))
9307 + check_object_size(to, n, false);
9308 + memset(to, 0, n);
9309 + }
9310 return n;
9311 }
9312
9313 @@ -65,110 +68,198 @@ int copy_to_user(void __user *dst, const
9314 {
9315 might_fault();
9316
9317 - return _copy_to_user(dst, src, size);
9318 + if (access_ok(VERIFY_WRITE, dst, size))
9319 + size = __copy_to_user(dst, src, size);
9320 + return size;
9321 }
9322
9323 static __always_inline __must_check
9324 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9325 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9326 {
9327 - int ret = 0;
9328 + int sz = __compiletime_object_size(dst);
9329 + unsigned ret = 0;
9330
9331 might_fault();
9332 - if (!__builtin_constant_p(size))
9333 - return copy_user_generic(dst, (__force void *)src, size);
9334 +
9335 + pax_track_stack();
9336 +
9337 + if ((int)size < 0)
9338 + return size;
9339 +
9340 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9341 + if (!__access_ok(VERIFY_READ, src, size))
9342 + return size;
9343 +#endif
9344 +
9345 + if (unlikely(sz != -1 && sz < size)) {
9346 +#ifdef CONFIG_DEBUG_VM
9347 + WARN(1, "Buffer overflow detected!\n");
9348 +#endif
9349 + return size;
9350 + }
9351 +
9352 + if (!__builtin_constant_p(size)) {
9353 + check_object_size(dst, size, false);
9354 +
9355 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9356 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9357 + src += PAX_USER_SHADOW_BASE;
9358 +#endif
9359 +
9360 + return copy_user_generic(dst, (__force const void *)src, size);
9361 + }
9362 switch (size) {
9363 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9364 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9365 ret, "b", "b", "=q", 1);
9366 return ret;
9367 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9368 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9369 ret, "w", "w", "=r", 2);
9370 return ret;
9371 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9372 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9373 ret, "l", "k", "=r", 4);
9374 return ret;
9375 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9376 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9377 ret, "q", "", "=r", 8);
9378 return ret;
9379 case 10:
9380 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9381 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9382 ret, "q", "", "=r", 10);
9383 if (unlikely(ret))
9384 return ret;
9385 __get_user_asm(*(u16 *)(8 + (char *)dst),
9386 - (u16 __user *)(8 + (char __user *)src),
9387 + (const u16 __user *)(8 + (const char __user *)src),
9388 ret, "w", "w", "=r", 2);
9389 return ret;
9390 case 16:
9391 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9392 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9393 ret, "q", "", "=r", 16);
9394 if (unlikely(ret))
9395 return ret;
9396 __get_user_asm(*(u64 *)(8 + (char *)dst),
9397 - (u64 __user *)(8 + (char __user *)src),
9398 + (const u64 __user *)(8 + (const char __user *)src),
9399 ret, "q", "", "=r", 8);
9400 return ret;
9401 default:
9402 - return copy_user_generic(dst, (__force void *)src, size);
9403 +
9404 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9405 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9406 + src += PAX_USER_SHADOW_BASE;
9407 +#endif
9408 +
9409 + return copy_user_generic(dst, (__force const void *)src, size);
9410 }
9411 }
9412
9413 static __always_inline __must_check
9414 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9415 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9416 {
9417 - int ret = 0;
9418 + int sz = __compiletime_object_size(src);
9419 + unsigned ret = 0;
9420
9421 might_fault();
9422 - if (!__builtin_constant_p(size))
9423 +
9424 + pax_track_stack();
9425 +
9426 + if ((int)size < 0)
9427 + return size;
9428 +
9429 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9430 + if (!__access_ok(VERIFY_WRITE, dst, size))
9431 + return size;
9432 +#endif
9433 +
9434 + if (unlikely(sz != -1 && sz < size)) {
9435 +#ifdef CONFIG_DEBUG_VM
9436 + WARN(1, "Buffer overflow detected!\n");
9437 +#endif
9438 + return size;
9439 + }
9440 +
9441 + if (!__builtin_constant_p(size)) {
9442 + check_object_size(src, size, true);
9443 +
9444 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9445 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9446 + dst += PAX_USER_SHADOW_BASE;
9447 +#endif
9448 +
9449 return copy_user_generic((__force void *)dst, src, size);
9450 + }
9451 switch (size) {
9452 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9453 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9454 ret, "b", "b", "iq", 1);
9455 return ret;
9456 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9457 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9458 ret, "w", "w", "ir", 2);
9459 return ret;
9460 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9461 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9462 ret, "l", "k", "ir", 4);
9463 return ret;
9464 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9465 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9466 ret, "q", "", "er", 8);
9467 return ret;
9468 case 10:
9469 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9470 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9471 ret, "q", "", "er", 10);
9472 if (unlikely(ret))
9473 return ret;
9474 asm("":::"memory");
9475 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9476 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9477 ret, "w", "w", "ir", 2);
9478 return ret;
9479 case 16:
9480 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9481 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9482 ret, "q", "", "er", 16);
9483 if (unlikely(ret))
9484 return ret;
9485 asm("":::"memory");
9486 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9487 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9488 ret, "q", "", "er", 8);
9489 return ret;
9490 default:
9491 +
9492 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9493 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9494 + dst += PAX_USER_SHADOW_BASE;
9495 +#endif
9496 +
9497 return copy_user_generic((__force void *)dst, src, size);
9498 }
9499 }
9500
9501 static __always_inline __must_check
9502 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9503 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9504 {
9505 - int ret = 0;
9506 + unsigned ret = 0;
9507
9508 might_fault();
9509 - if (!__builtin_constant_p(size))
9510 +
9511 + if ((int)size < 0)
9512 + return size;
9513 +
9514 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9515 + if (!__access_ok(VERIFY_READ, src, size))
9516 + return size;
9517 + if (!__access_ok(VERIFY_WRITE, dst, size))
9518 + return size;
9519 +#endif
9520 +
9521 + if (!__builtin_constant_p(size)) {
9522 +
9523 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9524 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9525 + src += PAX_USER_SHADOW_BASE;
9526 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9527 + dst += PAX_USER_SHADOW_BASE;
9528 +#endif
9529 +
9530 return copy_user_generic((__force void *)dst,
9531 - (__force void *)src, size);
9532 + (__force const void *)src, size);
9533 + }
9534 switch (size) {
9535 case 1: {
9536 u8 tmp;
9537 - __get_user_asm(tmp, (u8 __user *)src,
9538 + __get_user_asm(tmp, (const u8 __user *)src,
9539 ret, "b", "b", "=q", 1);
9540 if (likely(!ret))
9541 __put_user_asm(tmp, (u8 __user *)dst,
9542 @@ -177,7 +268,7 @@ int __copy_in_user(void __user *dst, con
9543 }
9544 case 2: {
9545 u16 tmp;
9546 - __get_user_asm(tmp, (u16 __user *)src,
9547 + __get_user_asm(tmp, (const u16 __user *)src,
9548 ret, "w", "w", "=r", 2);
9549 if (likely(!ret))
9550 __put_user_asm(tmp, (u16 __user *)dst,
9551 @@ -187,7 +278,7 @@ int __copy_in_user(void __user *dst, con
9552
9553 case 4: {
9554 u32 tmp;
9555 - __get_user_asm(tmp, (u32 __user *)src,
9556 + __get_user_asm(tmp, (const u32 __user *)src,
9557 ret, "l", "k", "=r", 4);
9558 if (likely(!ret))
9559 __put_user_asm(tmp, (u32 __user *)dst,
9560 @@ -196,7 +287,7 @@ int __copy_in_user(void __user *dst, con
9561 }
9562 case 8: {
9563 u64 tmp;
9564 - __get_user_asm(tmp, (u64 __user *)src,
9565 + __get_user_asm(tmp, (const u64 __user *)src,
9566 ret, "q", "", "=r", 8);
9567 if (likely(!ret))
9568 __put_user_asm(tmp, (u64 __user *)dst,
9569 @@ -204,8 +295,16 @@ int __copy_in_user(void __user *dst, con
9570 return ret;
9571 }
9572 default:
9573 +
9574 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9575 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9576 + src += PAX_USER_SHADOW_BASE;
9577 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9578 + dst += PAX_USER_SHADOW_BASE;
9579 +#endif
9580 +
9581 return copy_user_generic((__force void *)dst,
9582 - (__force void *)src, size);
9583 + (__force const void *)src, size);
9584 }
9585 }
9586
9587 @@ -222,33 +321,72 @@ __must_check unsigned long __clear_user(
9588 static __must_check __always_inline int
9589 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9590 {
9591 + pax_track_stack();
9592 +
9593 + if ((int)size < 0)
9594 + return size;
9595 +
9596 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9597 + if (!__access_ok(VERIFY_READ, src, size))
9598 + return size;
9599 +
9600 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9601 + src += PAX_USER_SHADOW_BASE;
9602 +#endif
9603 +
9604 return copy_user_generic(dst, (__force const void *)src, size);
9605 }
9606
9607 -static __must_check __always_inline int
9608 +static __must_check __always_inline unsigned long
9609 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9610 {
9611 + if ((int)size < 0)
9612 + return size;
9613 +
9614 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9615 + if (!__access_ok(VERIFY_WRITE, dst, size))
9616 + return size;
9617 +
9618 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9619 + dst += PAX_USER_SHADOW_BASE;
9620 +#endif
9621 +
9622 return copy_user_generic((__force void *)dst, src, size);
9623 }
9624
9625 -extern long __copy_user_nocache(void *dst, const void __user *src,
9626 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9627 unsigned size, int zerorest);
9628
9629 -static inline int
9630 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9631 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9632 {
9633 might_sleep();
9634 +
9635 + if ((int)size < 0)
9636 + return size;
9637 +
9638 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9639 + if (!__access_ok(VERIFY_READ, src, size))
9640 + return size;
9641 +#endif
9642 +
9643 return __copy_user_nocache(dst, src, size, 1);
9644 }
9645
9646 -static inline int
9647 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9648 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9649 unsigned size)
9650 {
9651 + if ((int)size < 0)
9652 + return size;
9653 +
9654 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9655 + if (!__access_ok(VERIFY_READ, src, size))
9656 + return size;
9657 +#endif
9658 +
9659 return __copy_user_nocache(dst, src, size, 0);
9660 }
9661
9662 -unsigned long
9663 +extern unsigned long
9664 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9665
9666 #endif /* _ASM_X86_UACCESS_64_H */
9667 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess.h linux-2.6.39.4/arch/x86/include/asm/uaccess.h
9668 --- linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-06-03 00:04:13.000000000 -0400
9669 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
9670 @@ -8,12 +8,15 @@
9671 #include <linux/thread_info.h>
9672 #include <linux/prefetch.h>
9673 #include <linux/string.h>
9674 +#include <linux/sched.h>
9675 #include <asm/asm.h>
9676 #include <asm/page.h>
9677
9678 #define VERIFY_READ 0
9679 #define VERIFY_WRITE 1
9680
9681 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9682 +
9683 /*
9684 * The fs value determines whether argument validity checking should be
9685 * performed or not. If get_fs() == USER_DS, checking is performed, with
9686 @@ -29,7 +32,12 @@
9687
9688 #define get_ds() (KERNEL_DS)
9689 #define get_fs() (current_thread_info()->addr_limit)
9690 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9691 +void __set_fs(mm_segment_t x);
9692 +void set_fs(mm_segment_t x);
9693 +#else
9694 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9695 +#endif
9696
9697 #define segment_eq(a, b) ((a).seg == (b).seg)
9698
9699 @@ -77,7 +85,33 @@
9700 * checks that the pointer is in the user space range - after calling
9701 * this function, memory access functions may still return -EFAULT.
9702 */
9703 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9704 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9705 +#define access_ok(type, addr, size) \
9706 +({ \
9707 + long __size = size; \
9708 + unsigned long __addr = (unsigned long)addr; \
9709 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9710 + unsigned long __end_ao = __addr + __size - 1; \
9711 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9712 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9713 + while(__addr_ao <= __end_ao) { \
9714 + char __c_ao; \
9715 + __addr_ao += PAGE_SIZE; \
9716 + if (__size > PAGE_SIZE) \
9717 + cond_resched(); \
9718 + if (__get_user(__c_ao, (char __user *)__addr)) \
9719 + break; \
9720 + if (type != VERIFY_WRITE) { \
9721 + __addr = __addr_ao; \
9722 + continue; \
9723 + } \
9724 + if (__put_user(__c_ao, (char __user *)__addr)) \
9725 + break; \
9726 + __addr = __addr_ao; \
9727 + } \
9728 + } \
9729 + __ret_ao; \
9730 +})
9731
9732 /*
9733 * The exception table consists of pairs of addresses: the first is the
9734 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
9735 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9736 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9737
9738 -
9739 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9740 +#define __copyuser_seg "gs;"
9741 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9742 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9743 +#else
9744 +#define __copyuser_seg
9745 +#define __COPYUSER_SET_ES
9746 +#define __COPYUSER_RESTORE_ES
9747 +#endif
9748
9749 #ifdef CONFIG_X86_32
9750 #define __put_user_asm_u64(x, addr, err, errret) \
9751 - asm volatile("1: movl %%eax,0(%2)\n" \
9752 - "2: movl %%edx,4(%2)\n" \
9753 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9754 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9755 "3:\n" \
9756 ".section .fixup,\"ax\"\n" \
9757 "4: movl %3,%0\n" \
9758 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
9759 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9760
9761 #define __put_user_asm_ex_u64(x, addr) \
9762 - asm volatile("1: movl %%eax,0(%1)\n" \
9763 - "2: movl %%edx,4(%1)\n" \
9764 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9765 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9766 "3:\n" \
9767 _ASM_EXTABLE(1b, 2b - 1b) \
9768 _ASM_EXTABLE(2b, 3b - 2b) \
9769 @@ -374,7 +416,7 @@ do { \
9770 } while (0)
9771
9772 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9773 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9774 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9775 "2:\n" \
9776 ".section .fixup,\"ax\"\n" \
9777 "3: mov %3,%0\n" \
9778 @@ -382,7 +424,7 @@ do { \
9779 " jmp 2b\n" \
9780 ".previous\n" \
9781 _ASM_EXTABLE(1b, 3b) \
9782 - : "=r" (err), ltype(x) \
9783 + : "=r" (err), ltype (x) \
9784 : "m" (__m(addr)), "i" (errret), "0" (err))
9785
9786 #define __get_user_size_ex(x, ptr, size) \
9787 @@ -407,7 +449,7 @@ do { \
9788 } while (0)
9789
9790 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9791 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9792 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9793 "2:\n" \
9794 _ASM_EXTABLE(1b, 2b - 1b) \
9795 : ltype(x) : "m" (__m(addr)))
9796 @@ -424,13 +466,24 @@ do { \
9797 int __gu_err; \
9798 unsigned long __gu_val; \
9799 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9800 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9801 + (x) = (__typeof__(*(ptr)))__gu_val; \
9802 __gu_err; \
9803 })
9804
9805 /* FIXME: this hack is definitely wrong -AK */
9806 struct __large_struct { unsigned long buf[100]; };
9807 -#define __m(x) (*(struct __large_struct __user *)(x))
9808 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9809 +#define ____m(x) \
9810 +({ \
9811 + unsigned long ____x = (unsigned long)(x); \
9812 + if (____x < PAX_USER_SHADOW_BASE) \
9813 + ____x += PAX_USER_SHADOW_BASE; \
9814 + (void __user *)____x; \
9815 +})
9816 +#else
9817 +#define ____m(x) (x)
9818 +#endif
9819 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9820
9821 /*
9822 * Tell gcc we read from memory instead of writing: this is because
9823 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
9824 * aliasing issues.
9825 */
9826 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9827 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9828 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9829 "2:\n" \
9830 ".section .fixup,\"ax\"\n" \
9831 "3: mov %3,%0\n" \
9832 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
9833 ".previous\n" \
9834 _ASM_EXTABLE(1b, 3b) \
9835 : "=r"(err) \
9836 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9837 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9838
9839 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9840 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9841 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9842 "2:\n" \
9843 _ASM_EXTABLE(1b, 2b - 1b) \
9844 : : ltype(x), "m" (__m(addr)))
9845 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
9846 * On error, the variable @x is set to zero.
9847 */
9848
9849 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9850 +#define __get_user(x, ptr) get_user((x), (ptr))
9851 +#else
9852 #define __get_user(x, ptr) \
9853 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9854 +#endif
9855
9856 /**
9857 * __put_user: - Write a simple value into user space, with less checking.
9858 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
9859 * Returns zero on success, or -EFAULT on error.
9860 */
9861
9862 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9863 +#define __put_user(x, ptr) put_user((x), (ptr))
9864 +#else
9865 #define __put_user(x, ptr) \
9866 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9867 +#endif
9868
9869 #define __get_user_unaligned __get_user
9870 #define __put_user_unaligned __put_user
9871 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
9872 #define get_user_ex(x, ptr) do { \
9873 unsigned long __gue_val; \
9874 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9875 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9876 + (x) = (__typeof__(*(ptr)))__gue_val; \
9877 } while (0)
9878
9879 #ifdef CONFIG_X86_WP_WORKS_OK
9880 @@ -567,6 +628,7 @@ extern struct movsl_mask {
9881
9882 #define ARCH_HAS_NOCACHE_UACCESS 1
9883
9884 +#define ARCH_HAS_SORT_EXTABLE
9885 #ifdef CONFIG_X86_32
9886 # include "uaccess_32.h"
9887 #else
9888 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vgtod.h linux-2.6.39.4/arch/x86/include/asm/vgtod.h
9889 --- linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-05-19 00:06:34.000000000 -0400
9890 +++ linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-08-05 19:44:33.000000000 -0400
9891 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9892 int sysctl_enabled;
9893 struct timezone sys_tz;
9894 struct { /* extract of a clocksource struct */
9895 + char name[8];
9896 cycle_t (*vread)(void);
9897 cycle_t cycle_last;
9898 cycle_t mask;
9899 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vsyscall.h linux-2.6.39.4/arch/x86/include/asm/vsyscall.h
9900 --- linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-05-19 00:06:34.000000000 -0400
9901 +++ linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-08-05 19:44:33.000000000 -0400
9902 @@ -15,9 +15,10 @@ enum vsyscall_num {
9903
9904 #ifdef __KERNEL__
9905 #include <linux/seqlock.h>
9906 +#include <linux/getcpu.h>
9907 +#include <linux/time.h>
9908
9909 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
9910 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
9911
9912 /* Definitions for CONFIG_GENERIC_TIME definitions */
9913 #define __section_vsyscall_gtod_data __attribute__ \
9914 @@ -31,7 +32,6 @@ enum vsyscall_num {
9915 #define VGETCPU_LSL 2
9916
9917 extern int __vgetcpu_mode;
9918 -extern volatile unsigned long __jiffies;
9919
9920 /* kernel space (writeable) */
9921 extern int vgetcpu_mode;
9922 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
9923
9924 extern void map_vsyscall(void);
9925
9926 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
9927 +extern time_t vtime(time_t *t);
9928 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
9929 #endif /* __KERNEL__ */
9930
9931 #endif /* _ASM_X86_VSYSCALL_H */
9932 diff -urNp linux-2.6.39.4/arch/x86/include/asm/x86_init.h linux-2.6.39.4/arch/x86/include/asm/x86_init.h
9933 --- linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-05-19 00:06:34.000000000 -0400
9934 +++ linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-08-05 20:34:06.000000000 -0400
9935 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9936 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9937 void (*find_smp_config)(void);
9938 void (*get_smp_config)(unsigned int early);
9939 -};
9940 +} __no_const;
9941
9942 /**
9943 * struct x86_init_resources - platform specific resource related ops
9944 @@ -42,7 +42,7 @@ struct x86_init_resources {
9945 void (*probe_roms)(void);
9946 void (*reserve_resources)(void);
9947 char *(*memory_setup)(void);
9948 -};
9949 +} __no_const;
9950
9951 /**
9952 * struct x86_init_irqs - platform specific interrupt setup
9953 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9954 void (*pre_vector_init)(void);
9955 void (*intr_init)(void);
9956 void (*trap_init)(void);
9957 -};
9958 +} __no_const;
9959
9960 /**
9961 * struct x86_init_oem - oem platform specific customizing functions
9962 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9963 struct x86_init_oem {
9964 void (*arch_setup)(void);
9965 void (*banner)(void);
9966 -};
9967 +} __no_const;
9968
9969 /**
9970 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9971 @@ -76,7 +76,7 @@ struct x86_init_oem {
9972 */
9973 struct x86_init_mapping {
9974 void (*pagetable_reserve)(u64 start, u64 end);
9975 -};
9976 +} __no_const;
9977
9978 /**
9979 * struct x86_init_paging - platform specific paging functions
9980 @@ -86,7 +86,7 @@ struct x86_init_mapping {
9981 struct x86_init_paging {
9982 void (*pagetable_setup_start)(pgd_t *base);
9983 void (*pagetable_setup_done)(pgd_t *base);
9984 -};
9985 +} __no_const;
9986
9987 /**
9988 * struct x86_init_timers - platform specific timer setup
9989 @@ -101,7 +101,7 @@ struct x86_init_timers {
9990 void (*tsc_pre_init)(void);
9991 void (*timer_init)(void);
9992 void (*wallclock_init)(void);
9993 -};
9994 +} __no_const;
9995
9996 /**
9997 * struct x86_init_iommu - platform specific iommu setup
9998 @@ -109,7 +109,7 @@ struct x86_init_timers {
9999 */
10000 struct x86_init_iommu {
10001 int (*iommu_init)(void);
10002 -};
10003 +} __no_const;
10004
10005 /**
10006 * struct x86_init_pci - platform specific pci init functions
10007 @@ -123,7 +123,7 @@ struct x86_init_pci {
10008 int (*init)(void);
10009 void (*init_irq)(void);
10010 void (*fixup_irqs)(void);
10011 -};
10012 +} __no_const;
10013
10014 /**
10015 * struct x86_init_ops - functions for platform specific setup
10016 @@ -139,7 +139,7 @@ struct x86_init_ops {
10017 struct x86_init_timers timers;
10018 struct x86_init_iommu iommu;
10019 struct x86_init_pci pci;
10020 -};
10021 +} __no_const;
10022
10023 /**
10024 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10025 @@ -147,7 +147,7 @@ struct x86_init_ops {
10026 */
10027 struct x86_cpuinit_ops {
10028 void (*setup_percpu_clockev)(void);
10029 -};
10030 +} __no_const;
10031
10032 /**
10033 * struct x86_platform_ops - platform specific runtime functions
10034 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10035 bool (*is_untracked_pat_range)(u64 start, u64 end);
10036 void (*nmi_init)(void);
10037 int (*i8042_detect)(void);
10038 -};
10039 +} __no_const;
10040
10041 struct pci_dev;
10042
10043 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10044 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10045 void (*teardown_msi_irq)(unsigned int irq);
10046 void (*teardown_msi_irqs)(struct pci_dev *dev);
10047 -};
10048 +} __no_const;
10049
10050 extern struct x86_init_ops x86_init;
10051 extern struct x86_cpuinit_ops x86_cpuinit;
10052 diff -urNp linux-2.6.39.4/arch/x86/include/asm/xsave.h linux-2.6.39.4/arch/x86/include/asm/xsave.h
10053 --- linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-05-19 00:06:34.000000000 -0400
10054 +++ linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-08-05 19:44:33.000000000 -0400
10055 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10056 {
10057 int err;
10058
10059 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10060 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10061 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10062 +#endif
10063 +
10064 /*
10065 * Clear the xsave header first, so that reserved fields are
10066 * initialized to zero.
10067 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10068 u32 lmask = mask;
10069 u32 hmask = mask >> 32;
10070
10071 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10072 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10073 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10074 +#endif
10075 +
10076 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10077 "2:\n"
10078 ".section .fixup,\"ax\"\n"
10079 diff -urNp linux-2.6.39.4/arch/x86/Kconfig linux-2.6.39.4/arch/x86/Kconfig
10080 --- linux-2.6.39.4/arch/x86/Kconfig 2011-05-19 00:06:34.000000000 -0400
10081 +++ linux-2.6.39.4/arch/x86/Kconfig 2011-08-05 19:44:33.000000000 -0400
10082 @@ -224,7 +224,7 @@ config X86_HT
10083
10084 config X86_32_LAZY_GS
10085 def_bool y
10086 - depends on X86_32 && !CC_STACKPROTECTOR
10087 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10088
10089 config ARCH_HWEIGHT_CFLAGS
10090 string
10091 @@ -1022,7 +1022,7 @@ choice
10092
10093 config NOHIGHMEM
10094 bool "off"
10095 - depends on !X86_NUMAQ
10096 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10097 ---help---
10098 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10099 However, the address space of 32-bit x86 processors is only 4
10100 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
10101
10102 config HIGHMEM4G
10103 bool "4GB"
10104 - depends on !X86_NUMAQ
10105 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10106 ---help---
10107 Select this if you have a 32-bit processor and between 1 and 4
10108 gigabytes of physical RAM.
10109 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
10110 hex
10111 default 0xB0000000 if VMSPLIT_3G_OPT
10112 default 0x80000000 if VMSPLIT_2G
10113 - default 0x78000000 if VMSPLIT_2G_OPT
10114 + default 0x70000000 if VMSPLIT_2G_OPT
10115 default 0x40000000 if VMSPLIT_1G
10116 default 0xC0000000
10117 depends on X86_32
10118 @@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED
10119
10120 config EFI
10121 bool "EFI runtime service support"
10122 - depends on ACPI
10123 + depends on ACPI && !PAX_KERNEXEC
10124 ---help---
10125 This enables the kernel to use EFI runtime services that are
10126 available (such as the EFI variable services).
10127 @@ -1487,6 +1487,7 @@ config SECCOMP
10128
10129 config CC_STACKPROTECTOR
10130 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10131 + depends on X86_64 || !PAX_MEMORY_UDEREF
10132 ---help---
10133 This option turns on the -fstack-protector GCC feature. This
10134 feature puts, at the beginning of functions, a canary value on
10135 @@ -1544,6 +1545,7 @@ config KEXEC_JUMP
10136 config PHYSICAL_START
10137 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10138 default "0x1000000"
10139 + range 0x400000 0x40000000
10140 ---help---
10141 This gives the physical address where the kernel is loaded.
10142
10143 @@ -1607,6 +1609,7 @@ config X86_NEED_RELOCS
10144 config PHYSICAL_ALIGN
10145 hex "Alignment value to which kernel should be aligned" if X86_32
10146 default "0x1000000"
10147 + range 0x400000 0x1000000 if PAX_KERNEXEC
10148 range 0x2000 0x1000000
10149 ---help---
10150 This value puts the alignment restrictions on physical address
10151 @@ -1638,9 +1641,10 @@ config HOTPLUG_CPU
10152 Say N if you want to disable CPU hotplug.
10153
10154 config COMPAT_VDSO
10155 - def_bool y
10156 + def_bool n
10157 prompt "Compat VDSO support"
10158 depends on X86_32 || IA32_EMULATION
10159 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10160 ---help---
10161 Map the 32-bit VDSO to the predictable old-style address too.
10162
10163 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.cpu linux-2.6.39.4/arch/x86/Kconfig.cpu
10164 --- linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-05-19 00:06:34.000000000 -0400
10165 +++ linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-08-05 19:44:33.000000000 -0400
10166 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
10167
10168 config X86_F00F_BUG
10169 def_bool y
10170 - depends on M586MMX || M586TSC || M586 || M486 || M386
10171 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10172
10173 config X86_INVD_BUG
10174 def_bool y
10175 @@ -358,7 +358,7 @@ config X86_POPAD_OK
10176
10177 config X86_ALIGNMENT_16
10178 def_bool y
10179 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10180 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10181
10182 config X86_INTEL_USERCOPY
10183 def_bool y
10184 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
10185 # generates cmov.
10186 config X86_CMOV
10187 def_bool y
10188 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10189 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10190
10191 config X86_MINIMUM_CPU_FAMILY
10192 int
10193 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.debug linux-2.6.39.4/arch/x86/Kconfig.debug
10194 --- linux-2.6.39.4/arch/x86/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
10195 +++ linux-2.6.39.4/arch/x86/Kconfig.debug 2011-08-05 19:44:33.000000000 -0400
10196 @@ -101,7 +101,7 @@ config X86_PTDUMP
10197 config DEBUG_RODATA
10198 bool "Write protect kernel read-only data structures"
10199 default y
10200 - depends on DEBUG_KERNEL
10201 + depends on DEBUG_KERNEL && BROKEN
10202 ---help---
10203 Mark the kernel read-only data as write-protected in the pagetables,
10204 in order to catch accidental (and incorrect) writes to such const
10205 @@ -119,7 +119,7 @@ config DEBUG_RODATA_TEST
10206
10207 config DEBUG_SET_MODULE_RONX
10208 bool "Set loadable kernel module data as NX and text as RO"
10209 - depends on MODULES
10210 + depends on MODULES && BROKEN
10211 ---help---
10212 This option helps catch unintended modifications to loadable
10213 kernel module's text and read-only data. It also prevents execution
10214 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile
10215 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-05-19 00:06:34.000000000 -0400
10216 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-05 20:34:06.000000000 -0400
10217 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10218 $(call cc-option, -fno-stack-protector) \
10219 $(call cc-option, -mpreferred-stack-boundary=2)
10220 KBUILD_CFLAGS += $(call cc-option, -m32)
10221 +ifdef CONSTIFY_PLUGIN
10222 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10223 +endif
10224 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10225 GCOV_PROFILE := n
10226
10227 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S
10228 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-09 09:18:51.000000000 -0400
10229 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-05 19:44:33.000000000 -0400
10230 @@ -108,6 +108,9 @@ wakeup_code:
10231 /* Do any other stuff... */
10232
10233 #ifndef CONFIG_64BIT
10234 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10235 + call verify_cpu
10236 +
10237 /* This could also be done in C code... */
10238 movl pmode_cr3, %eax
10239 movl %eax, %cr3
10240 @@ -131,6 +134,7 @@ wakeup_code:
10241 movl pmode_cr0, %eax
10242 movl %eax, %cr0
10243 jmp pmode_return
10244 +# include "../../verify_cpu.S"
10245 #else
10246 pushw $0
10247 pushw trampoline_segment
10248 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c
10249 --- linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-07-09 09:18:51.000000000 -0400
10250 +++ linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-08-05 19:44:33.000000000 -0400
10251 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10252 header->trampoline_segment = trampoline_address() >> 4;
10253 #ifdef CONFIG_SMP
10254 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10255 +
10256 + pax_open_kernel();
10257 early_gdt_descr.address =
10258 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10259 + pax_close_kernel();
10260 +
10261 initial_gs = per_cpu_offset(smp_processor_id());
10262 #endif
10263 initial_code = (unsigned long)wakeup_long64;
10264 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S
10265 --- linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-05-19 00:06:34.000000000 -0400
10266 +++ linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-05 19:44:33.000000000 -0400
10267 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10268 # and restore the stack ... but you need gdt for this to work
10269 movl saved_context_esp, %esp
10270
10271 - movl %cs:saved_magic, %eax
10272 - cmpl $0x12345678, %eax
10273 + cmpl $0x12345678, saved_magic
10274 jne bogus_magic
10275
10276 # jump to place where we left off
10277 - movl saved_eip, %eax
10278 - jmp *%eax
10279 + jmp *(saved_eip)
10280
10281 bogus_magic:
10282 jmp bogus_magic
10283 diff -urNp linux-2.6.39.4/arch/x86/kernel/alternative.c linux-2.6.39.4/arch/x86/kernel/alternative.c
10284 --- linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-05-19 00:06:34.000000000 -0400
10285 +++ linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-08-05 19:44:33.000000000 -0400
10286 @@ -248,7 +248,7 @@ static void alternatives_smp_lock(const
10287 if (!*poff || ptr < text || ptr >= text_end)
10288 continue;
10289 /* turn DS segment override prefix into lock prefix */
10290 - if (*ptr == 0x3e)
10291 + if (*ktla_ktva(ptr) == 0x3e)
10292 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10293 };
10294 mutex_unlock(&text_mutex);
10295 @@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons
10296 if (!*poff || ptr < text || ptr >= text_end)
10297 continue;
10298 /* turn lock prefix into DS segment override prefix */
10299 - if (*ptr == 0xf0)
10300 + if (*ktla_ktva(ptr) == 0xf0)
10301 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10302 };
10303 mutex_unlock(&text_mutex);
10304 @@ -438,7 +438,7 @@ void __init_or_module apply_paravirt(str
10305
10306 BUG_ON(p->len > MAX_PATCH_LEN);
10307 /* prep the buffer with the original instructions */
10308 - memcpy(insnbuf, p->instr, p->len);
10309 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10310 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10311 (unsigned long)p->instr, p->len);
10312
10313 @@ -506,7 +506,7 @@ void __init alternative_instructions(voi
10314 if (smp_alt_once)
10315 free_init_pages("SMP alternatives",
10316 (unsigned long)__smp_locks,
10317 - (unsigned long)__smp_locks_end);
10318 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10319
10320 restart_nmi();
10321 }
10322 @@ -523,13 +523,17 @@ void __init alternative_instructions(voi
10323 * instructions. And on the local CPU you need to be protected again NMI or MCE
10324 * handlers seeing an inconsistent instruction while you patch.
10325 */
10326 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10327 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10328 size_t len)
10329 {
10330 unsigned long flags;
10331 local_irq_save(flags);
10332 - memcpy(addr, opcode, len);
10333 +
10334 + pax_open_kernel();
10335 + memcpy(ktla_ktva(addr), opcode, len);
10336 sync_core();
10337 + pax_close_kernel();
10338 +
10339 local_irq_restore(flags);
10340 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10341 that causes hangs on some VIA CPUs. */
10342 @@ -551,36 +555,22 @@ void *__init_or_module text_poke_early(v
10343 */
10344 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10345 {
10346 - unsigned long flags;
10347 - char *vaddr;
10348 + unsigned char *vaddr = ktla_ktva(addr);
10349 struct page *pages[2];
10350 - int i;
10351 + size_t i;
10352
10353 if (!core_kernel_text((unsigned long)addr)) {
10354 - pages[0] = vmalloc_to_page(addr);
10355 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10356 + pages[0] = vmalloc_to_page(vaddr);
10357 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10358 } else {
10359 - pages[0] = virt_to_page(addr);
10360 + pages[0] = virt_to_page(vaddr);
10361 WARN_ON(!PageReserved(pages[0]));
10362 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10363 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10364 }
10365 BUG_ON(!pages[0]);
10366 - local_irq_save(flags);
10367 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10368 - if (pages[1])
10369 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10370 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10371 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10372 - clear_fixmap(FIX_TEXT_POKE0);
10373 - if (pages[1])
10374 - clear_fixmap(FIX_TEXT_POKE1);
10375 - local_flush_tlb();
10376 - sync_core();
10377 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10378 - that causes hangs on some VIA CPUs. */
10379 + text_poke_early(addr, opcode, len);
10380 for (i = 0; i < len; i++)
10381 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10382 - local_irq_restore(flags);
10383 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10384 return addr;
10385 }
10386
10387 @@ -682,9 +672,9 @@ void __kprobes text_poke_smp_batch(struc
10388 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
10389
10390 #ifdef CONFIG_X86_64
10391 -unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10392 +unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10393 #else
10394 -unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10395 +unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10396 #endif
10397
10398 void __init arch_init_ideal_nop5(void)
10399 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/apic.c linux-2.6.39.4/arch/x86/kernel/apic/apic.c
10400 --- linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-05-19 00:06:34.000000000 -0400
10401 +++ linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-08-05 19:44:33.000000000 -0400
10402 @@ -1821,7 +1821,7 @@ void smp_error_interrupt(struct pt_regs
10403 apic_write(APIC_ESR, 0);
10404 v1 = apic_read(APIC_ESR);
10405 ack_APIC_irq();
10406 - atomic_inc(&irq_err_count);
10407 + atomic_inc_unchecked(&irq_err_count);
10408
10409 /*
10410 * Here is what the APIC error bits mean:
10411 @@ -2204,6 +2204,8 @@ static int __cpuinit apic_cluster_num(vo
10412 u16 *bios_cpu_apicid;
10413 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10414
10415 + pax_track_stack();
10416 +
10417 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10418 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10419
10420 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c
10421 --- linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:04:13.000000000 -0400
10422 +++ linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-08-05 19:44:33.000000000 -0400
10423 @@ -623,7 +623,7 @@ struct IO_APIC_route_entry **alloc_ioapi
10424 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
10425 GFP_ATOMIC);
10426 if (!ioapic_entries)
10427 - return 0;
10428 + return NULL;
10429
10430 for (apic = 0; apic < nr_ioapics; apic++) {
10431 ioapic_entries[apic] =
10432 @@ -640,7 +640,7 @@ nomem:
10433 kfree(ioapic_entries[apic]);
10434 kfree(ioapic_entries);
10435
10436 - return 0;
10437 + return NULL;
10438 }
10439
10440 /*
10441 @@ -1040,7 +1040,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10442 }
10443 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10444
10445 -void lock_vector_lock(void)
10446 +void lock_vector_lock(void) __acquires(vector_lock)
10447 {
10448 /* Used to the online set of cpus does not change
10449 * during assign_irq_vector.
10450 @@ -1048,7 +1048,7 @@ void lock_vector_lock(void)
10451 raw_spin_lock(&vector_lock);
10452 }
10453
10454 -void unlock_vector_lock(void)
10455 +void unlock_vector_lock(void) __releases(vector_lock)
10456 {
10457 raw_spin_unlock(&vector_lock);
10458 }
10459 @@ -2379,7 +2379,7 @@ static void ack_apic_edge(struct irq_dat
10460 ack_APIC_irq();
10461 }
10462
10463 -atomic_t irq_mis_count;
10464 +atomic_unchecked_t irq_mis_count;
10465
10466 /*
10467 * IO-APIC versions below 0x20 don't support EOI register.
10468 @@ -2487,7 +2487,7 @@ static void ack_apic_level(struct irq_da
10469 * at the cpu.
10470 */
10471 if (!(v & (1 << (i & 0x1f)))) {
10472 - atomic_inc(&irq_mis_count);
10473 + atomic_inc_unchecked(&irq_mis_count);
10474
10475 eoi_ioapic_irq(irq, cfg);
10476 }
10477 diff -urNp linux-2.6.39.4/arch/x86/kernel/apm_32.c linux-2.6.39.4/arch/x86/kernel/apm_32.c
10478 --- linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-05-19 00:06:34.000000000 -0400
10479 +++ linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-08-05 19:44:33.000000000 -0400
10480 @@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
10481 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10482 * even though they are called in protected mode.
10483 */
10484 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10485 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10486 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10487
10488 static const char driver_version[] = "1.16ac"; /* no spaces */
10489 @@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
10490 BUG_ON(cpu != 0);
10491 gdt = get_cpu_gdt_table(cpu);
10492 save_desc_40 = gdt[0x40 / 8];
10493 +
10494 + pax_open_kernel();
10495 gdt[0x40 / 8] = bad_bios_desc;
10496 + pax_close_kernel();
10497
10498 apm_irq_save(flags);
10499 APM_DO_SAVE_SEGS;
10500 @@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
10501 &call->esi);
10502 APM_DO_RESTORE_SEGS;
10503 apm_irq_restore(flags);
10504 +
10505 + pax_open_kernel();
10506 gdt[0x40 / 8] = save_desc_40;
10507 + pax_close_kernel();
10508 +
10509 put_cpu();
10510
10511 return call->eax & 0xff;
10512 @@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void
10513 BUG_ON(cpu != 0);
10514 gdt = get_cpu_gdt_table(cpu);
10515 save_desc_40 = gdt[0x40 / 8];
10516 +
10517 + pax_open_kernel();
10518 gdt[0x40 / 8] = bad_bios_desc;
10519 + pax_close_kernel();
10520
10521 apm_irq_save(flags);
10522 APM_DO_SAVE_SEGS;
10523 @@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void
10524 &call->eax);
10525 APM_DO_RESTORE_SEGS;
10526 apm_irq_restore(flags);
10527 +
10528 + pax_open_kernel();
10529 gdt[0x40 / 8] = save_desc_40;
10530 + pax_close_kernel();
10531 +
10532 put_cpu();
10533 return error;
10534 }
10535 @@ -2351,12 +2365,15 @@ static int __init apm_init(void)
10536 * code to that CPU.
10537 */
10538 gdt = get_cpu_gdt_table(0);
10539 +
10540 + pax_open_kernel();
10541 set_desc_base(&gdt[APM_CS >> 3],
10542 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10543 set_desc_base(&gdt[APM_CS_16 >> 3],
10544 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10545 set_desc_base(&gdt[APM_DS >> 3],
10546 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10547 + pax_close_kernel();
10548
10549 proc_create("apm", 0, NULL, &apm_file_ops);
10550
10551 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c
10552 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-05-19 00:06:34.000000000 -0400
10553 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-08-05 19:44:33.000000000 -0400
10554 @@ -69,6 +69,7 @@ int main(void)
10555 BLANK();
10556 #undef ENTRY
10557
10558 + DEFINE(TSS_size, sizeof(struct tss_struct));
10559 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10560 BLANK();
10561
10562 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets.c linux-2.6.39.4/arch/x86/kernel/asm-offsets.c
10563 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-05-19 00:06:34.000000000 -0400
10564 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-08-05 19:44:33.000000000 -0400
10565 @@ -33,6 +33,8 @@ void common(void) {
10566 OFFSET(TI_status, thread_info, status);
10567 OFFSET(TI_addr_limit, thread_info, addr_limit);
10568 OFFSET(TI_preempt_count, thread_info, preempt_count);
10569 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10570 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10571
10572 BLANK();
10573 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10574 @@ -53,8 +55,26 @@ void common(void) {
10575 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10576 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10577 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10578 +
10579 +#ifdef CONFIG_PAX_KERNEXEC
10580 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10581 +#endif
10582 +
10583 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10584 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10585 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10586 +#ifdef CONFIG_X86_64
10587 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
10588 +#endif
10589 #endif
10590
10591 +#endif
10592 +
10593 + BLANK();
10594 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10595 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10596 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10597 +
10598 #ifdef CONFIG_XEN
10599 BLANK();
10600 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10601 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/amd.c linux-2.6.39.4/arch/x86/kernel/cpu/amd.c
10602 --- linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-06-03 00:04:13.000000000 -0400
10603 +++ linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-08-05 19:44:33.000000000 -0400
10604 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10605 unsigned int size)
10606 {
10607 /* AMD errata T13 (order #21922) */
10608 - if ((c->x86 == 6)) {
10609 + if (c->x86 == 6) {
10610 /* Duron Rev A0 */
10611 if (c->x86_model == 3 && c->x86_mask == 0)
10612 size = 64;
10613 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/common.c linux-2.6.39.4/arch/x86/kernel/cpu/common.c
10614 --- linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-06-03 00:04:13.000000000 -0400
10615 +++ linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-08-05 19:44:33.000000000 -0400
10616 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10617
10618 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10619
10620 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10621 -#ifdef CONFIG_X86_64
10622 - /*
10623 - * We need valid kernel segments for data and code in long mode too
10624 - * IRET will check the segment types kkeil 2000/10/28
10625 - * Also sysret mandates a special GDT layout
10626 - *
10627 - * TLS descriptors are currently at a different place compared to i386.
10628 - * Hopefully nobody expects them at a fixed place (Wine?)
10629 - */
10630 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10631 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10632 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10633 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10634 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10635 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10636 -#else
10637 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10638 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10639 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10640 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10641 - /*
10642 - * Segments used for calling PnP BIOS have byte granularity.
10643 - * They code segments and data segments have fixed 64k limits,
10644 - * the transfer segment sizes are set at run time.
10645 - */
10646 - /* 32-bit code */
10647 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10648 - /* 16-bit code */
10649 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10650 - /* 16-bit data */
10651 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10652 - /* 16-bit data */
10653 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10654 - /* 16-bit data */
10655 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10656 - /*
10657 - * The APM segments have byte granularity and their bases
10658 - * are set at run time. All have 64k limits.
10659 - */
10660 - /* 32-bit code */
10661 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10662 - /* 16-bit code */
10663 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10664 - /* data */
10665 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10666 -
10667 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10668 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10669 - GDT_STACK_CANARY_INIT
10670 -#endif
10671 -} };
10672 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10673 -
10674 static int __init x86_xsave_setup(char *s)
10675 {
10676 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10677 @@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu)
10678 {
10679 struct desc_ptr gdt_descr;
10680
10681 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10682 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10683 gdt_descr.size = GDT_SIZE - 1;
10684 load_gdt(&gdt_descr);
10685 /* Reload the per-cpu base */
10686 @@ -824,6 +770,10 @@ static void __cpuinit identify_cpu(struc
10687 /* Filter out anything that depends on CPUID levels we don't have */
10688 filter_cpuid_features(c, true);
10689
10690 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10691 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10692 +#endif
10693 +
10694 /* If the model name is still unset, do table lookup. */
10695 if (!c->x86_model_id[0]) {
10696 const char *p;
10697 @@ -1003,6 +953,9 @@ static __init int setup_disablecpuid(cha
10698 }
10699 __setup("clearcpuid=", setup_disablecpuid);
10700
10701 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10702 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10703 +
10704 #ifdef CONFIG_X86_64
10705 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10706
10707 @@ -1018,7 +971,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10708 EXPORT_PER_CPU_SYMBOL(current_task);
10709
10710 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10711 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10712 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10713 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10714
10715 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10716 @@ -1083,7 +1036,7 @@ struct pt_regs * __cpuinit idle_regs(str
10717 {
10718 memset(regs, 0, sizeof(struct pt_regs));
10719 regs->fs = __KERNEL_PERCPU;
10720 - regs->gs = __KERNEL_STACK_CANARY;
10721 + savesegment(gs, regs->gs);
10722
10723 return regs;
10724 }
10725 @@ -1138,7 +1091,7 @@ void __cpuinit cpu_init(void)
10726 int i;
10727
10728 cpu = stack_smp_processor_id();
10729 - t = &per_cpu(init_tss, cpu);
10730 + t = init_tss + cpu;
10731 oist = &per_cpu(orig_ist, cpu);
10732
10733 #ifdef CONFIG_NUMA
10734 @@ -1164,7 +1117,7 @@ void __cpuinit cpu_init(void)
10735 switch_to_new_gdt(cpu);
10736 loadsegment(fs, 0);
10737
10738 - load_idt((const struct desc_ptr *)&idt_descr);
10739 + load_idt(&idt_descr);
10740
10741 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10742 syscall_init();
10743 @@ -1173,7 +1126,6 @@ void __cpuinit cpu_init(void)
10744 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10745 barrier();
10746
10747 - x86_configure_nx();
10748 if (cpu != 0)
10749 enable_x2apic();
10750
10751 @@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
10752 {
10753 int cpu = smp_processor_id();
10754 struct task_struct *curr = current;
10755 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10756 + struct tss_struct *t = init_tss + cpu;
10757 struct thread_struct *thread = &curr->thread;
10758
10759 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10760 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/intel.c linux-2.6.39.4/arch/x86/kernel/cpu/intel.c
10761 --- linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-05-19 00:06:34.000000000 -0400
10762 +++ linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-08-05 19:44:33.000000000 -0400
10763 @@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
10764 * Update the IDT descriptor and reload the IDT so that
10765 * it uses the read-only mapped virtual address.
10766 */
10767 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10768 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10769 load_idt(&idt_descr);
10770 }
10771 #endif
10772 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/Makefile linux-2.6.39.4/arch/x86/kernel/cpu/Makefile
10773 --- linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-05-19 00:06:34.000000000 -0400
10774 +++ linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-08-05 19:44:33.000000000 -0400
10775 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10776 CFLAGS_REMOVE_perf_event.o = -pg
10777 endif
10778
10779 -# Make sure load_percpu_segment has no stackprotector
10780 -nostackp := $(call cc-option, -fno-stack-protector)
10781 -CFLAGS_common.o := $(nostackp)
10782 -
10783 obj-y := intel_cacheinfo.o scattered.o topology.o
10784 obj-y += proc.o capflags.o powerflags.o common.o
10785 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10786 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c
10787 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-19 00:06:34.000000000 -0400
10788 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-05 19:44:33.000000000 -0400
10789 @@ -46,6 +46,7 @@
10790 #include <asm/ipi.h>
10791 #include <asm/mce.h>
10792 #include <asm/msr.h>
10793 +#include <asm/local.h>
10794
10795 #include "mce-internal.h"
10796
10797 @@ -220,7 +221,7 @@ static void print_mce(struct mce *m)
10798 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10799 m->cs, m->ip);
10800
10801 - if (m->cs == __KERNEL_CS)
10802 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10803 print_symbol("{%s}", m->ip);
10804 pr_cont("\n");
10805 }
10806 @@ -244,10 +245,10 @@ static void print_mce(struct mce *m)
10807
10808 #define PANIC_TIMEOUT 5 /* 5 seconds */
10809
10810 -static atomic_t mce_paniced;
10811 +static atomic_unchecked_t mce_paniced;
10812
10813 static int fake_panic;
10814 -static atomic_t mce_fake_paniced;
10815 +static atomic_unchecked_t mce_fake_paniced;
10816
10817 /* Panic in progress. Enable interrupts and wait for final IPI */
10818 static void wait_for_panic(void)
10819 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10820 /*
10821 * Make sure only one CPU runs in machine check panic
10822 */
10823 - if (atomic_inc_return(&mce_paniced) > 1)
10824 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10825 wait_for_panic();
10826 barrier();
10827
10828 @@ -279,7 +280,7 @@ static void mce_panic(char *msg, struct
10829 console_verbose();
10830 } else {
10831 /* Don't log too much for fake panic */
10832 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10833 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10834 return;
10835 }
10836 /* First print corrected ones that are still unlogged */
10837 @@ -647,7 +648,7 @@ static int mce_timed_out(u64 *t)
10838 * might have been modified by someone else.
10839 */
10840 rmb();
10841 - if (atomic_read(&mce_paniced))
10842 + if (atomic_read_unchecked(&mce_paniced))
10843 wait_for_panic();
10844 if (!monarch_timeout)
10845 goto out;
10846 @@ -1461,14 +1462,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10847 */
10848
10849 static DEFINE_SPINLOCK(mce_state_lock);
10850 -static int open_count; /* #times opened */
10851 +static local_t open_count; /* #times opened */
10852 static int open_exclu; /* already open exclusive? */
10853
10854 static int mce_open(struct inode *inode, struct file *file)
10855 {
10856 spin_lock(&mce_state_lock);
10857
10858 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10859 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10860 spin_unlock(&mce_state_lock);
10861
10862 return -EBUSY;
10863 @@ -1476,7 +1477,7 @@ static int mce_open(struct inode *inode,
10864
10865 if (file->f_flags & O_EXCL)
10866 open_exclu = 1;
10867 - open_count++;
10868 + local_inc(&open_count);
10869
10870 spin_unlock(&mce_state_lock);
10871
10872 @@ -1487,7 +1488,7 @@ static int mce_release(struct inode *ino
10873 {
10874 spin_lock(&mce_state_lock);
10875
10876 - open_count--;
10877 + local_dec(&open_count);
10878 open_exclu = 0;
10879
10880 spin_unlock(&mce_state_lock);
10881 @@ -2174,7 +2175,7 @@ struct dentry *mce_get_debugfs_dir(void)
10882 static void mce_reset(void)
10883 {
10884 cpu_missing = 0;
10885 - atomic_set(&mce_fake_paniced, 0);
10886 + atomic_set_unchecked(&mce_fake_paniced, 0);
10887 atomic_set(&mce_executing, 0);
10888 atomic_set(&mce_callin, 0);
10889 atomic_set(&global_nwo, 0);
10890 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10891 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-05-19 00:06:34.000000000 -0400
10892 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:34:06.000000000 -0400
10893 @@ -215,7 +215,9 @@ static int inject_init(void)
10894 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10895 return -ENOMEM;
10896 printk(KERN_INFO "Machine check injector initialized\n");
10897 - mce_chrdev_ops.write = mce_write;
10898 + pax_open_kernel();
10899 + *(void **)&mce_chrdev_ops.write = mce_write;
10900 + pax_close_kernel();
10901 register_die_notifier(&mce_raise_nb);
10902 return 0;
10903 }
10904 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c
10905 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-05-19 00:06:34.000000000 -0400
10906 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-05 19:44:33.000000000 -0400
10907 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10908 u64 size_or_mask, size_and_mask;
10909 static bool mtrr_aps_delayed_init;
10910
10911 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10912 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10913
10914 const struct mtrr_ops *mtrr_if;
10915
10916 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10917 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-19 00:06:34.000000000 -0400
10918 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-05 20:34:06.000000000 -0400
10919 @@ -12,8 +12,8 @@
10920 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10921
10922 struct mtrr_ops {
10923 - u32 vendor;
10924 - u32 use_intel_if;
10925 + const u32 vendor;
10926 + const u32 use_intel_if;
10927 void (*set)(unsigned int reg, unsigned long base,
10928 unsigned long size, mtrr_type type);
10929 void (*set_all)(void);
10930 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c
10931 --- linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-05-19 00:06:34.000000000 -0400
10932 +++ linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-08-05 19:44:33.000000000 -0400
10933 @@ -774,6 +774,8 @@ static int x86_schedule_events(struct cp
10934 int i, j, w, wmax, num = 0;
10935 struct hw_perf_event *hwc;
10936
10937 + pax_track_stack();
10938 +
10939 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10940
10941 for (i = 0; i < n; i++) {
10942 @@ -1878,7 +1880,7 @@ perf_callchain_user(struct perf_callchai
10943 break;
10944
10945 perf_callchain_store(entry, frame.return_address);
10946 - fp = frame.next_frame;
10947 + fp = (__force const void __user *)frame.next_frame;
10948 }
10949 }
10950
10951 diff -urNp linux-2.6.39.4/arch/x86/kernel/crash.c linux-2.6.39.4/arch/x86/kernel/crash.c
10952 --- linux-2.6.39.4/arch/x86/kernel/crash.c 2011-05-19 00:06:34.000000000 -0400
10953 +++ linux-2.6.39.4/arch/x86/kernel/crash.c 2011-08-05 19:44:33.000000000 -0400
10954 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10955 regs = args->regs;
10956
10957 #ifdef CONFIG_X86_32
10958 - if (!user_mode_vm(regs)) {
10959 + if (!user_mode(regs)) {
10960 crash_fixup_ss_esp(&fixed_regs, regs);
10961 regs = &fixed_regs;
10962 }
10963 diff -urNp linux-2.6.39.4/arch/x86/kernel/doublefault_32.c linux-2.6.39.4/arch/x86/kernel/doublefault_32.c
10964 --- linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-05-19 00:06:34.000000000 -0400
10965 +++ linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-08-05 19:44:33.000000000 -0400
10966 @@ -11,7 +11,7 @@
10967
10968 #define DOUBLEFAULT_STACKSIZE (1024)
10969 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10970 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10971 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10972
10973 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10974
10975 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10976 unsigned long gdt, tss;
10977
10978 store_gdt(&gdt_desc);
10979 - gdt = gdt_desc.address;
10980 + gdt = (unsigned long)gdt_desc.address;
10981
10982 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10983
10984 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10985 /* 0x2 bit is always set */
10986 .flags = X86_EFLAGS_SF | 0x2,
10987 .sp = STACK_START,
10988 - .es = __USER_DS,
10989 + .es = __KERNEL_DS,
10990 .cs = __KERNEL_CS,
10991 .ss = __KERNEL_DS,
10992 - .ds = __USER_DS,
10993 + .ds = __KERNEL_DS,
10994 .fs = __KERNEL_PERCPU,
10995
10996 .__cr3 = __pa_nodebug(swapper_pg_dir),
10997 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c
10998 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-05-19 00:06:34.000000000 -0400
10999 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-08-05 19:44:33.000000000 -0400
11000 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11001 bp = stack_frame(task, regs);
11002
11003 for (;;) {
11004 - struct thread_info *context;
11005 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11006
11007 - context = (struct thread_info *)
11008 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11009 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11010 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11011
11012 - stack = (unsigned long *)context->previous_esp;
11013 - if (!stack)
11014 + if (stack_start == task_stack_page(task))
11015 break;
11016 + stack = *(unsigned long **)stack_start;
11017 if (ops->stack(data, "IRQ") < 0)
11018 break;
11019 touch_nmi_watchdog();
11020 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11021 * When in-kernel, we also print out the stack and code at the
11022 * time of the fault..
11023 */
11024 - if (!user_mode_vm(regs)) {
11025 + if (!user_mode(regs)) {
11026 unsigned int code_prologue = code_bytes * 43 / 64;
11027 unsigned int code_len = code_bytes;
11028 unsigned char c;
11029 u8 *ip;
11030 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11031
11032 printk(KERN_EMERG "Stack:\n");
11033 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11034
11035 printk(KERN_EMERG "Code: ");
11036
11037 - ip = (u8 *)regs->ip - code_prologue;
11038 + ip = (u8 *)regs->ip - code_prologue + cs_base;
11039 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11040 /* try starting at IP */
11041 - ip = (u8 *)regs->ip;
11042 + ip = (u8 *)regs->ip + cs_base;
11043 code_len = code_len - code_prologue + 1;
11044 }
11045 for (i = 0; i < code_len; i++, ip++) {
11046 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11047 printk(" Bad EIP value.");
11048 break;
11049 }
11050 - if (ip == (u8 *)regs->ip)
11051 + if (ip == (u8 *)regs->ip + cs_base)
11052 printk("<%02x> ", c);
11053 else
11054 printk("%02x ", c);
11055 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11056 {
11057 unsigned short ud2;
11058
11059 + ip = ktla_ktva(ip);
11060 if (ip < PAGE_OFFSET)
11061 return 0;
11062 if (probe_kernel_address((unsigned short *)ip, ud2))
11063 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c
11064 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-05-19 00:06:34.000000000 -0400
11065 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-08-05 19:44:33.000000000 -0400
11066 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11067 unsigned long *irq_stack_end =
11068 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11069 unsigned used = 0;
11070 - struct thread_info *tinfo;
11071 int graph = 0;
11072 unsigned long dummy;
11073 + void *stack_start;
11074
11075 if (!task)
11076 task = current;
11077 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11078 * current stack address. If the stacks consist of nested
11079 * exceptions
11080 */
11081 - tinfo = task_thread_info(task);
11082 for (;;) {
11083 char *id;
11084 unsigned long *estack_end;
11085 +
11086 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11087 &used, &id);
11088
11089 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11090 if (ops->stack(data, id) < 0)
11091 break;
11092
11093 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11094 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11095 data, estack_end, &graph);
11096 ops->stack(data, "<EOE>");
11097 /*
11098 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11099 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11100 if (ops->stack(data, "IRQ") < 0)
11101 break;
11102 - bp = ops->walk_stack(tinfo, stack, bp,
11103 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11104 ops, data, irq_stack_end, &graph);
11105 /*
11106 * We link to the next stack (which would be
11107 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11108 /*
11109 * This handles the process stack:
11110 */
11111 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11112 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11113 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11114 put_cpu();
11115 }
11116 EXPORT_SYMBOL(dump_trace);
11117 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack.c linux-2.6.39.4/arch/x86/kernel/dumpstack.c
11118 --- linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-05-19 00:06:34.000000000 -0400
11119 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-08-05 19:44:33.000000000 -0400
11120 @@ -2,6 +2,9 @@
11121 * Copyright (C) 1991, 1992 Linus Torvalds
11122 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11123 */
11124 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11125 +#define __INCLUDED_BY_HIDESYM 1
11126 +#endif
11127 #include <linux/kallsyms.h>
11128 #include <linux/kprobes.h>
11129 #include <linux/uaccess.h>
11130 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11131 static void
11132 print_ftrace_graph_addr(unsigned long addr, void *data,
11133 const struct stacktrace_ops *ops,
11134 - struct thread_info *tinfo, int *graph)
11135 + struct task_struct *task, int *graph)
11136 {
11137 - struct task_struct *task = tinfo->task;
11138 unsigned long ret_addr;
11139 int index = task->curr_ret_stack;
11140
11141 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11142 static inline void
11143 print_ftrace_graph_addr(unsigned long addr, void *data,
11144 const struct stacktrace_ops *ops,
11145 - struct thread_info *tinfo, int *graph)
11146 + struct task_struct *task, int *graph)
11147 { }
11148 #endif
11149
11150 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11151 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11152 */
11153
11154 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11155 - void *p, unsigned int size, void *end)
11156 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11157 {
11158 - void *t = tinfo;
11159 if (end) {
11160 if (p < end && p >= (end-THREAD_SIZE))
11161 return 1;
11162 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11163 }
11164
11165 unsigned long
11166 -print_context_stack(struct thread_info *tinfo,
11167 +print_context_stack(struct task_struct *task, void *stack_start,
11168 unsigned long *stack, unsigned long bp,
11169 const struct stacktrace_ops *ops, void *data,
11170 unsigned long *end, int *graph)
11171 {
11172 struct stack_frame *frame = (struct stack_frame *)bp;
11173
11174 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11175 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11176 unsigned long addr;
11177
11178 addr = *stack;
11179 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11180 } else {
11181 ops->address(data, addr, 0);
11182 }
11183 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11184 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11185 }
11186 stack++;
11187 }
11188 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11189 EXPORT_SYMBOL_GPL(print_context_stack);
11190
11191 unsigned long
11192 -print_context_stack_bp(struct thread_info *tinfo,
11193 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11194 unsigned long *stack, unsigned long bp,
11195 const struct stacktrace_ops *ops, void *data,
11196 unsigned long *end, int *graph)
11197 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11198 struct stack_frame *frame = (struct stack_frame *)bp;
11199 unsigned long *ret_addr = &frame->return_address;
11200
11201 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11202 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11203 unsigned long addr = *ret_addr;
11204
11205 if (!__kernel_text_address(addr))
11206 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11207 ops->address(data, addr, 1);
11208 frame = frame->next_frame;
11209 ret_addr = &frame->return_address;
11210 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11211 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11212 }
11213
11214 return (unsigned long)frame;
11215 @@ -202,7 +202,7 @@ void dump_stack(void)
11216
11217 bp = stack_frame(current, NULL);
11218 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11219 - current->pid, current->comm, print_tainted(),
11220 + task_pid_nr(current), current->comm, print_tainted(),
11221 init_utsname()->release,
11222 (int)strcspn(init_utsname()->version, " "),
11223 init_utsname()->version);
11224 @@ -238,6 +238,8 @@ unsigned __kprobes long oops_begin(void)
11225 }
11226 EXPORT_SYMBOL_GPL(oops_begin);
11227
11228 +extern void gr_handle_kernel_exploit(void);
11229 +
11230 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11231 {
11232 if (regs && kexec_should_crash(current))
11233 @@ -259,7 +261,10 @@ void __kprobes oops_end(unsigned long fl
11234 panic("Fatal exception in interrupt");
11235 if (panic_on_oops)
11236 panic("Fatal exception");
11237 - do_exit(signr);
11238 +
11239 + gr_handle_kernel_exploit();
11240 +
11241 + do_group_exit(signr);
11242 }
11243
11244 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11245 @@ -286,7 +291,7 @@ int __kprobes __die(const char *str, str
11246
11247 show_registers(regs);
11248 #ifdef CONFIG_X86_32
11249 - if (user_mode_vm(regs)) {
11250 + if (user_mode(regs)) {
11251 sp = regs->sp;
11252 ss = regs->ss & 0xffff;
11253 } else {
11254 @@ -314,7 +319,7 @@ void die(const char *str, struct pt_regs
11255 unsigned long flags = oops_begin();
11256 int sig = SIGSEGV;
11257
11258 - if (!user_mode_vm(regs))
11259 + if (!user_mode(regs))
11260 report_bug(regs->ip, regs);
11261
11262 if (__die(str, regs, err))
11263 diff -urNp linux-2.6.39.4/arch/x86/kernel/early_printk.c linux-2.6.39.4/arch/x86/kernel/early_printk.c
11264 --- linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-05-19 00:06:34.000000000 -0400
11265 +++ linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-08-05 19:44:33.000000000 -0400
11266 @@ -7,6 +7,7 @@
11267 #include <linux/pci_regs.h>
11268 #include <linux/pci_ids.h>
11269 #include <linux/errno.h>
11270 +#include <linux/sched.h>
11271 #include <asm/io.h>
11272 #include <asm/processor.h>
11273 #include <asm/fcntl.h>
11274 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11275 int n;
11276 va_list ap;
11277
11278 + pax_track_stack();
11279 +
11280 va_start(ap, fmt);
11281 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11282 early_console->write(early_console, buf, n);
11283 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_32.S linux-2.6.39.4/arch/x86/kernel/entry_32.S
11284 --- linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-05-19 00:06:34.000000000 -0400
11285 +++ linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-08-05 19:44:33.000000000 -0400
11286 @@ -185,13 +185,146 @@
11287 /*CFI_REL_OFFSET gs, PT_GS*/
11288 .endm
11289 .macro SET_KERNEL_GS reg
11290 +
11291 +#ifdef CONFIG_CC_STACKPROTECTOR
11292 movl $(__KERNEL_STACK_CANARY), \reg
11293 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11294 + movl $(__USER_DS), \reg
11295 +#else
11296 + xorl \reg, \reg
11297 +#endif
11298 +
11299 movl \reg, %gs
11300 .endm
11301
11302 #endif /* CONFIG_X86_32_LAZY_GS */
11303
11304 -.macro SAVE_ALL
11305 +.macro pax_enter_kernel
11306 +#ifdef CONFIG_PAX_KERNEXEC
11307 + call pax_enter_kernel
11308 +#endif
11309 +.endm
11310 +
11311 +.macro pax_exit_kernel
11312 +#ifdef CONFIG_PAX_KERNEXEC
11313 + call pax_exit_kernel
11314 +#endif
11315 +.endm
11316 +
11317 +#ifdef CONFIG_PAX_KERNEXEC
11318 +ENTRY(pax_enter_kernel)
11319 +#ifdef CONFIG_PARAVIRT
11320 + pushl %eax
11321 + pushl %ecx
11322 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11323 + mov %eax, %esi
11324 +#else
11325 + mov %cr0, %esi
11326 +#endif
11327 + bts $16, %esi
11328 + jnc 1f
11329 + mov %cs, %esi
11330 + cmp $__KERNEL_CS, %esi
11331 + jz 3f
11332 + ljmp $__KERNEL_CS, $3f
11333 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11334 +2:
11335 +#ifdef CONFIG_PARAVIRT
11336 + mov %esi, %eax
11337 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11338 +#else
11339 + mov %esi, %cr0
11340 +#endif
11341 +3:
11342 +#ifdef CONFIG_PARAVIRT
11343 + popl %ecx
11344 + popl %eax
11345 +#endif
11346 + ret
11347 +ENDPROC(pax_enter_kernel)
11348 +
11349 +ENTRY(pax_exit_kernel)
11350 +#ifdef CONFIG_PARAVIRT
11351 + pushl %eax
11352 + pushl %ecx
11353 +#endif
11354 + mov %cs, %esi
11355 + cmp $__KERNEXEC_KERNEL_CS, %esi
11356 + jnz 2f
11357 +#ifdef CONFIG_PARAVIRT
11358 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11359 + mov %eax, %esi
11360 +#else
11361 + mov %cr0, %esi
11362 +#endif
11363 + btr $16, %esi
11364 + ljmp $__KERNEL_CS, $1f
11365 +1:
11366 +#ifdef CONFIG_PARAVIRT
11367 + mov %esi, %eax
11368 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11369 +#else
11370 + mov %esi, %cr0
11371 +#endif
11372 +2:
11373 +#ifdef CONFIG_PARAVIRT
11374 + popl %ecx
11375 + popl %eax
11376 +#endif
11377 + ret
11378 +ENDPROC(pax_exit_kernel)
11379 +#endif
11380 +
11381 +.macro pax_erase_kstack
11382 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11383 + call pax_erase_kstack
11384 +#endif
11385 +.endm
11386 +
11387 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11388 +/*
11389 + * ebp: thread_info
11390 + * ecx, edx: can be clobbered
11391 + */
11392 +ENTRY(pax_erase_kstack)
11393 + pushl %edi
11394 + pushl %eax
11395 +
11396 + mov TI_lowest_stack(%ebp), %edi
11397 + mov $-0xBEEF, %eax
11398 + std
11399 +
11400 +1: mov %edi, %ecx
11401 + and $THREAD_SIZE_asm - 1, %ecx
11402 + shr $2, %ecx
11403 + repne scasl
11404 + jecxz 2f
11405 +
11406 + cmp $2*16, %ecx
11407 + jc 2f
11408 +
11409 + mov $2*16, %ecx
11410 + repe scasl
11411 + jecxz 2f
11412 + jne 1b
11413 +
11414 +2: cld
11415 + mov %esp, %ecx
11416 + sub %edi, %ecx
11417 + shr $2, %ecx
11418 + rep stosl
11419 +
11420 + mov TI_task_thread_sp0(%ebp), %edi
11421 + sub $128, %edi
11422 + mov %edi, TI_lowest_stack(%ebp)
11423 +
11424 + popl %eax
11425 + popl %edi
11426 + ret
11427 +ENDPROC(pax_erase_kstack)
11428 +#endif
11429 +
11430 +.macro __SAVE_ALL _DS
11431 cld
11432 PUSH_GS
11433 pushl_cfi %fs
11434 @@ -214,7 +347,7 @@
11435 CFI_REL_OFFSET ecx, 0
11436 pushl_cfi %ebx
11437 CFI_REL_OFFSET ebx, 0
11438 - movl $(__USER_DS), %edx
11439 + movl $\_DS, %edx
11440 movl %edx, %ds
11441 movl %edx, %es
11442 movl $(__KERNEL_PERCPU), %edx
11443 @@ -222,6 +355,15 @@
11444 SET_KERNEL_GS %edx
11445 .endm
11446
11447 +.macro SAVE_ALL
11448 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11449 + __SAVE_ALL __KERNEL_DS
11450 + pax_enter_kernel
11451 +#else
11452 + __SAVE_ALL __USER_DS
11453 +#endif
11454 +.endm
11455 +
11456 .macro RESTORE_INT_REGS
11457 popl_cfi %ebx
11458 CFI_RESTORE ebx
11459 @@ -332,7 +474,15 @@ check_userspace:
11460 movb PT_CS(%esp), %al
11461 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11462 cmpl $USER_RPL, %eax
11463 +
11464 +#ifdef CONFIG_PAX_KERNEXEC
11465 + jae resume_userspace
11466 +
11467 + PAX_EXIT_KERNEL
11468 + jmp resume_kernel
11469 +#else
11470 jb resume_kernel # not returning to v8086 or userspace
11471 +#endif
11472
11473 ENTRY(resume_userspace)
11474 LOCKDEP_SYS_EXIT
11475 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11476 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11477 # int/exception return?
11478 jne work_pending
11479 - jmp restore_all
11480 + jmp restore_all_pax
11481 END(ret_from_exception)
11482
11483 #ifdef CONFIG_PREEMPT
11484 @@ -394,23 +544,34 @@ sysenter_past_esp:
11485 /*CFI_REL_OFFSET cs, 0*/
11486 /*
11487 * Push current_thread_info()->sysenter_return to the stack.
11488 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11489 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11490 */
11491 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11492 + pushl_cfi $0
11493 CFI_REL_OFFSET eip, 0
11494
11495 pushl_cfi %eax
11496 SAVE_ALL
11497 + GET_THREAD_INFO(%ebp)
11498 + movl TI_sysenter_return(%ebp),%ebp
11499 + movl %ebp,PT_EIP(%esp)
11500 ENABLE_INTERRUPTS(CLBR_NONE)
11501
11502 /*
11503 * Load the potential sixth argument from user stack.
11504 * Careful about security.
11505 */
11506 + movl PT_OLDESP(%esp),%ebp
11507 +
11508 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11509 + mov PT_OLDSS(%esp),%ds
11510 +1: movl %ds:(%ebp),%ebp
11511 + push %ss
11512 + pop %ds
11513 +#else
11514 cmpl $__PAGE_OFFSET-3,%ebp
11515 jae syscall_fault
11516 1: movl (%ebp),%ebp
11517 +#endif
11518 +
11519 movl %ebp,PT_EBP(%esp)
11520 .section __ex_table,"a"
11521 .align 4
11522 @@ -433,12 +594,23 @@ sysenter_do_call:
11523 testl $_TIF_ALLWORK_MASK, %ecx
11524 jne sysexit_audit
11525 sysenter_exit:
11526 +
11527 +#ifdef CONFIG_PAX_RANDKSTACK
11528 + pushl_cfi %eax
11529 + call pax_randomize_kstack
11530 + popl_cfi %eax
11531 +#endif
11532 +
11533 + pax_erase_kstack
11534 +
11535 /* if something modifies registers it must also disable sysexit */
11536 movl PT_EIP(%esp), %edx
11537 movl PT_OLDESP(%esp), %ecx
11538 xorl %ebp,%ebp
11539 TRACE_IRQS_ON
11540 1: mov PT_FS(%esp), %fs
11541 +2: mov PT_DS(%esp), %ds
11542 +3: mov PT_ES(%esp), %es
11543 PTGS_TO_GS
11544 ENABLE_INTERRUPTS_SYSEXIT
11545
11546 @@ -455,6 +627,9 @@ sysenter_audit:
11547 movl %eax,%edx /* 2nd arg: syscall number */
11548 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11549 call audit_syscall_entry
11550 +
11551 + pax_erase_kstack
11552 +
11553 pushl_cfi %ebx
11554 movl PT_EAX(%esp),%eax /* reload syscall number */
11555 jmp sysenter_do_call
11556 @@ -481,11 +656,17 @@ sysexit_audit:
11557
11558 CFI_ENDPROC
11559 .pushsection .fixup,"ax"
11560 -2: movl $0,PT_FS(%esp)
11561 +4: movl $0,PT_FS(%esp)
11562 + jmp 1b
11563 +5: movl $0,PT_DS(%esp)
11564 + jmp 1b
11565 +6: movl $0,PT_ES(%esp)
11566 jmp 1b
11567 .section __ex_table,"a"
11568 .align 4
11569 - .long 1b,2b
11570 + .long 1b,4b
11571 + .long 2b,5b
11572 + .long 3b,6b
11573 .popsection
11574 PTGS_TO_GS_EX
11575 ENDPROC(ia32_sysenter_target)
11576 @@ -518,6 +699,14 @@ syscall_exit:
11577 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11578 jne syscall_exit_work
11579
11580 +restore_all_pax:
11581 +
11582 +#ifdef CONFIG_PAX_RANDKSTACK
11583 + call pax_randomize_kstack
11584 +#endif
11585 +
11586 + pax_erase_kstack
11587 +
11588 restore_all:
11589 TRACE_IRQS_IRET
11590 restore_all_notrace:
11591 @@ -577,14 +766,21 @@ ldt_ss:
11592 * compensating for the offset by changing to the ESPFIX segment with
11593 * a base address that matches for the difference.
11594 */
11595 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11596 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11597 mov %esp, %edx /* load kernel esp */
11598 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11599 mov %dx, %ax /* eax: new kernel esp */
11600 sub %eax, %edx /* offset (low word is 0) */
11601 +#ifdef CONFIG_SMP
11602 + movl PER_CPU_VAR(cpu_number), %ebx
11603 + shll $PAGE_SHIFT_asm, %ebx
11604 + addl $cpu_gdt_table, %ebx
11605 +#else
11606 + movl $cpu_gdt_table, %ebx
11607 +#endif
11608 shr $16, %edx
11609 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11610 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11611 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11612 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11613 pushl_cfi $__ESPFIX_SS
11614 pushl_cfi %eax /* new kernel esp */
11615 /* Disable interrupts, but do not irqtrace this section: we
11616 @@ -613,29 +809,23 @@ work_resched:
11617 movl TI_flags(%ebp), %ecx
11618 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11619 # than syscall tracing?
11620 - jz restore_all
11621 + jz restore_all_pax
11622 testb $_TIF_NEED_RESCHED, %cl
11623 jnz work_resched
11624
11625 work_notifysig: # deal with pending signals and
11626 # notify-resume requests
11627 + movl %esp, %eax
11628 #ifdef CONFIG_VM86
11629 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11630 - movl %esp, %eax
11631 - jne work_notifysig_v86 # returning to kernel-space or
11632 + jz 1f # returning to kernel-space or
11633 # vm86-space
11634 - xorl %edx, %edx
11635 - call do_notify_resume
11636 - jmp resume_userspace_sig
11637
11638 - ALIGN
11639 -work_notifysig_v86:
11640 pushl_cfi %ecx # save ti_flags for do_notify_resume
11641 call save_v86_state # %eax contains pt_regs pointer
11642 popl_cfi %ecx
11643 movl %eax, %esp
11644 -#else
11645 - movl %esp, %eax
11646 +1:
11647 #endif
11648 xorl %edx, %edx
11649 call do_notify_resume
11650 @@ -648,6 +838,9 @@ syscall_trace_entry:
11651 movl $-ENOSYS,PT_EAX(%esp)
11652 movl %esp, %eax
11653 call syscall_trace_enter
11654 +
11655 + pax_erase_kstack
11656 +
11657 /* What it returned is what we'll actually use. */
11658 cmpl $(nr_syscalls), %eax
11659 jnae syscall_call
11660 @@ -670,6 +863,10 @@ END(syscall_exit_work)
11661
11662 RING0_INT_FRAME # can't unwind into user space anyway
11663 syscall_fault:
11664 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11665 + push %ss
11666 + pop %ds
11667 +#endif
11668 GET_THREAD_INFO(%ebp)
11669 movl $-EFAULT,PT_EAX(%esp)
11670 jmp resume_userspace
11671 @@ -752,6 +949,36 @@ ptregs_clone:
11672 CFI_ENDPROC
11673 ENDPROC(ptregs_clone)
11674
11675 + ALIGN;
11676 +ENTRY(kernel_execve)
11677 + CFI_STARTPROC
11678 + pushl_cfi %ebp
11679 + sub $PT_OLDSS+4,%esp
11680 + pushl_cfi %edi
11681 + pushl_cfi %ecx
11682 + pushl_cfi %eax
11683 + lea 3*4(%esp),%edi
11684 + mov $PT_OLDSS/4+1,%ecx
11685 + xorl %eax,%eax
11686 + rep stosl
11687 + popl_cfi %eax
11688 + popl_cfi %ecx
11689 + popl_cfi %edi
11690 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11691 + pushl_cfi %esp
11692 + call sys_execve
11693 + add $4,%esp
11694 + CFI_ADJUST_CFA_OFFSET -4
11695 + GET_THREAD_INFO(%ebp)
11696 + test %eax,%eax
11697 + jz syscall_exit
11698 + add $PT_OLDSS+4,%esp
11699 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11700 + popl_cfi %ebp
11701 + ret
11702 + CFI_ENDPROC
11703 +ENDPROC(kernel_execve)
11704 +
11705 .macro FIXUP_ESPFIX_STACK
11706 /*
11707 * Switch back for ESPFIX stack to the normal zerobased stack
11708 @@ -761,8 +988,15 @@ ENDPROC(ptregs_clone)
11709 * normal stack and adjusts ESP with the matching offset.
11710 */
11711 /* fixup the stack */
11712 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11713 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11714 +#ifdef CONFIG_SMP
11715 + movl PER_CPU_VAR(cpu_number), %ebx
11716 + shll $PAGE_SHIFT_asm, %ebx
11717 + addl $cpu_gdt_table, %ebx
11718 +#else
11719 + movl $cpu_gdt_table, %ebx
11720 +#endif
11721 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11722 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11723 shl $16, %eax
11724 addl %esp, %eax /* the adjusted stack pointer */
11725 pushl_cfi $__KERNEL_DS
11726 @@ -1213,7 +1447,6 @@ return_to_handler:
11727 jmp *%ecx
11728 #endif
11729
11730 -.section .rodata,"a"
11731 #include "syscall_table_32.S"
11732
11733 syscall_table_size=(.-sys_call_table)
11734 @@ -1259,9 +1492,12 @@ error_code:
11735 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11736 REG_TO_PTGS %ecx
11737 SET_KERNEL_GS %ecx
11738 - movl $(__USER_DS), %ecx
11739 + movl $(__KERNEL_DS), %ecx
11740 movl %ecx, %ds
11741 movl %ecx, %es
11742 +
11743 + pax_enter_kernel
11744 +
11745 TRACE_IRQS_OFF
11746 movl %esp,%eax # pt_regs pointer
11747 call *%edi
11748 @@ -1346,6 +1582,9 @@ nmi_stack_correct:
11749 xorl %edx,%edx # zero error code
11750 movl %esp,%eax # pt_regs pointer
11751 call do_nmi
11752 +
11753 + pax_exit_kernel
11754 +
11755 jmp restore_all_notrace
11756 CFI_ENDPROC
11757
11758 @@ -1382,6 +1621,9 @@ nmi_espfix_stack:
11759 FIXUP_ESPFIX_STACK # %eax == %esp
11760 xorl %edx,%edx # zero error code
11761 call do_nmi
11762 +
11763 + pax_exit_kernel
11764 +
11765 RESTORE_REGS
11766 lss 12+4(%esp), %esp # back to espfix stack
11767 CFI_ADJUST_CFA_OFFSET -24
11768 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_64.S linux-2.6.39.4/arch/x86/kernel/entry_64.S
11769 --- linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-05-19 00:06:34.000000000 -0400
11770 +++ linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-08-05 19:44:33.000000000 -0400
11771 @@ -53,6 +53,7 @@
11772 #include <asm/paravirt.h>
11773 #include <asm/ftrace.h>
11774 #include <asm/percpu.h>
11775 +#include <asm/pgtable.h>
11776
11777 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11778 #include <linux/elf-em.h>
11779 @@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
11780 ENDPROC(native_usergs_sysret64)
11781 #endif /* CONFIG_PARAVIRT */
11782
11783 + .macro ljmpq sel, off
11784 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11785 + .byte 0x48; ljmp *1234f(%rip)
11786 + .pushsection .rodata
11787 + .align 16
11788 + 1234: .quad \off; .word \sel
11789 + .popsection
11790 +#else
11791 + pushq $\sel
11792 + pushq $\off
11793 + lretq
11794 +#endif
11795 + .endm
11796 +
11797 + .macro pax_enter_kernel
11798 +#ifdef CONFIG_PAX_KERNEXEC
11799 + call pax_enter_kernel
11800 +#endif
11801 + .endm
11802 +
11803 + .macro pax_exit_kernel
11804 +#ifdef CONFIG_PAX_KERNEXEC
11805 + call pax_exit_kernel
11806 +#endif
11807 + .endm
11808 +
11809 +#ifdef CONFIG_PAX_KERNEXEC
11810 +ENTRY(pax_enter_kernel)
11811 + pushq %rdi
11812 +
11813 +#ifdef CONFIG_PARAVIRT
11814 + PV_SAVE_REGS(CLBR_RDI)
11815 +#endif
11816 +
11817 + GET_CR0_INTO_RDI
11818 + bts $16,%rdi
11819 + jnc 1f
11820 + mov %cs,%edi
11821 + cmp $__KERNEL_CS,%edi
11822 + jz 3f
11823 + ljmpq __KERNEL_CS,3f
11824 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11825 +2: SET_RDI_INTO_CR0
11826 +3:
11827 +
11828 +#ifdef CONFIG_PARAVIRT
11829 + PV_RESTORE_REGS(CLBR_RDI)
11830 +#endif
11831 +
11832 + popq %rdi
11833 + retq
11834 +ENDPROC(pax_enter_kernel)
11835 +
11836 +ENTRY(pax_exit_kernel)
11837 + pushq %rdi
11838 +
11839 +#ifdef CONFIG_PARAVIRT
11840 + PV_SAVE_REGS(CLBR_RDI)
11841 +#endif
11842 +
11843 + mov %cs,%rdi
11844 + cmp $__KERNEXEC_KERNEL_CS,%edi
11845 + jnz 2f
11846 + GET_CR0_INTO_RDI
11847 + btr $16,%rdi
11848 + ljmpq __KERNEL_CS,1f
11849 +1: SET_RDI_INTO_CR0
11850 +2:
11851 +
11852 +#ifdef CONFIG_PARAVIRT
11853 + PV_RESTORE_REGS(CLBR_RDI);
11854 +#endif
11855 +
11856 + popq %rdi
11857 + retq
11858 +ENDPROC(pax_exit_kernel)
11859 +#endif
11860 +
11861 + .macro pax_enter_kernel_user
11862 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11863 + call pax_enter_kernel_user
11864 +#endif
11865 + .endm
11866 +
11867 + .macro pax_exit_kernel_user
11868 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11869 + call pax_exit_kernel_user
11870 +#endif
11871 +#ifdef CONFIG_PAX_RANDKSTACK
11872 + push %rax
11873 + call pax_randomize_kstack
11874 + pop %rax
11875 +#endif
11876 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11877 + call pax_erase_kstack
11878 +#endif
11879 + .endm
11880 +
11881 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11882 +ENTRY(pax_enter_kernel_user)
11883 + pushq %rdi
11884 + pushq %rbx
11885 +
11886 +#ifdef CONFIG_PARAVIRT
11887 + PV_SAVE_REGS(CLBR_RDI)
11888 +#endif
11889 +
11890 + GET_CR3_INTO_RDI
11891 + mov %rdi,%rbx
11892 + add $__START_KERNEL_map,%rbx
11893 + sub phys_base(%rip),%rbx
11894 +
11895 +#ifdef CONFIG_PARAVIRT
11896 + pushq %rdi
11897 + cmpl $0, pv_info+PARAVIRT_enabled
11898 + jz 1f
11899 + i = 0
11900 + .rept USER_PGD_PTRS
11901 + mov i*8(%rbx),%rsi
11902 + mov $0,%sil
11903 + lea i*8(%rbx),%rdi
11904 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11905 + i = i + 1
11906 + .endr
11907 + jmp 2f
11908 +1:
11909 +#endif
11910 +
11911 + i = 0
11912 + .rept USER_PGD_PTRS
11913 + movb $0,i*8(%rbx)
11914 + i = i + 1
11915 + .endr
11916 +
11917 +#ifdef CONFIG_PARAVIRT
11918 +2: popq %rdi
11919 +#endif
11920 + SET_RDI_INTO_CR3
11921 +
11922 +#ifdef CONFIG_PAX_KERNEXEC
11923 + GET_CR0_INTO_RDI
11924 + bts $16,%rdi
11925 + SET_RDI_INTO_CR0
11926 +#endif
11927 +
11928 +#ifdef CONFIG_PARAVIRT
11929 + PV_RESTORE_REGS(CLBR_RDI)
11930 +#endif
11931 +
11932 + popq %rbx
11933 + popq %rdi
11934 + retq
11935 +ENDPROC(pax_enter_kernel_user)
11936 +
11937 +ENTRY(pax_exit_kernel_user)
11938 + push %rdi
11939 +
11940 +#ifdef CONFIG_PARAVIRT
11941 + pushq %rbx
11942 + PV_SAVE_REGS(CLBR_RDI)
11943 +#endif
11944 +
11945 +#ifdef CONFIG_PAX_KERNEXEC
11946 + GET_CR0_INTO_RDI
11947 + btr $16,%rdi
11948 + SET_RDI_INTO_CR0
11949 +#endif
11950 +
11951 + GET_CR3_INTO_RDI
11952 + add $__START_KERNEL_map,%rdi
11953 + sub phys_base(%rip),%rdi
11954 +
11955 +#ifdef CONFIG_PARAVIRT
11956 + cmpl $0, pv_info+PARAVIRT_enabled
11957 + jz 1f
11958 + mov %rdi,%rbx
11959 + i = 0
11960 + .rept USER_PGD_PTRS
11961 + mov i*8(%rbx),%rsi
11962 + mov $0x67,%sil
11963 + lea i*8(%rbx),%rdi
11964 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11965 + i = i + 1
11966 + .endr
11967 + jmp 2f
11968 +1:
11969 +#endif
11970 +
11971 + i = 0
11972 + .rept USER_PGD_PTRS
11973 + movb $0x67,i*8(%rdi)
11974 + i = i + 1
11975 + .endr
11976 +
11977 +#ifdef CONFIG_PARAVIRT
11978 +2: PV_RESTORE_REGS(CLBR_RDI)
11979 + popq %rbx
11980 +#endif
11981 +
11982 + popq %rdi
11983 + retq
11984 +ENDPROC(pax_exit_kernel_user)
11985 +#endif
11986 +
11987 + .macro pax_erase_kstack
11988 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11989 + call pax_erase_kstack
11990 +#endif
11991 + .endm
11992 +
11993 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11994 +/*
11995 + * r10: thread_info
11996 + * rcx, rdx: can be clobbered
11997 + */
11998 +ENTRY(pax_erase_kstack)
11999 + pushq %rdi
12000 + pushq %rax
12001 +
12002 + GET_THREAD_INFO(%r10)
12003 + mov TI_lowest_stack(%r10), %rdi
12004 + mov $-0xBEEF, %rax
12005 + std
12006 +
12007 +1: mov %edi, %ecx
12008 + and $THREAD_SIZE_asm - 1, %ecx
12009 + shr $3, %ecx
12010 + repne scasq
12011 + jecxz 2f
12012 +
12013 + cmp $2*8, %ecx
12014 + jc 2f
12015 +
12016 + mov $2*8, %ecx
12017 + repe scasq
12018 + jecxz 2f
12019 + jne 1b
12020 +
12021 +2: cld
12022 + mov %esp, %ecx
12023 + sub %edi, %ecx
12024 + shr $3, %ecx
12025 + rep stosq
12026 +
12027 + mov TI_task_thread_sp0(%r10), %rdi
12028 + sub $256, %rdi
12029 + mov %rdi, TI_lowest_stack(%r10)
12030 +
12031 + popq %rax
12032 + popq %rdi
12033 + ret
12034 +ENDPROC(pax_erase_kstack)
12035 +#endif
12036
12037 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12038 #ifdef CONFIG_TRACE_IRQFLAGS
12039 @@ -318,7 +572,7 @@ ENTRY(save_args)
12040 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12041 movq_cfi rbp, 8 /* push %rbp */
12042 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12043 - testl $3, CS(%rdi)
12044 + testb $3, CS(%rdi)
12045 je 1f
12046 SWAPGS
12047 /*
12048 @@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
12049
12050 RESTORE_REST
12051
12052 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12053 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12054 je int_ret_from_sys_call
12055
12056 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12057 @@ -455,7 +709,7 @@ END(ret_from_fork)
12058 ENTRY(system_call)
12059 CFI_STARTPROC simple
12060 CFI_SIGNAL_FRAME
12061 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12062 + CFI_DEF_CFA rsp,0
12063 CFI_REGISTER rip,rcx
12064 /*CFI_REGISTER rflags,r11*/
12065 SWAPGS_UNSAFE_STACK
12066 @@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
12067
12068 movq %rsp,PER_CPU_VAR(old_rsp)
12069 movq PER_CPU_VAR(kernel_stack),%rsp
12070 + pax_enter_kernel_user
12071 /*
12072 * No need to follow this irqs off/on section - it's straight
12073 * and short:
12074 */
12075 ENABLE_INTERRUPTS(CLBR_NONE)
12076 - SAVE_ARGS 8,1
12077 + SAVE_ARGS 8*6,1
12078 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12079 movq %rcx,RIP-ARGOFFSET(%rsp)
12080 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12081 @@ -502,6 +757,7 @@ sysret_check:
12082 andl %edi,%edx
12083 jnz sysret_careful
12084 CFI_REMEMBER_STATE
12085 + pax_exit_kernel_user
12086 /*
12087 * sysretq will re-enable interrupts:
12088 */
12089 @@ -560,6 +816,9 @@ auditsys:
12090 movq %rax,%rsi /* 2nd arg: syscall number */
12091 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12092 call audit_syscall_entry
12093 +
12094 + pax_erase_kstack
12095 +
12096 LOAD_ARGS 0 /* reload call-clobbered registers */
12097 jmp system_call_fastpath
12098
12099 @@ -590,6 +849,9 @@ tracesys:
12100 FIXUP_TOP_OF_STACK %rdi
12101 movq %rsp,%rdi
12102 call syscall_trace_enter
12103 +
12104 + pax_erase_kstack
12105 +
12106 /*
12107 * Reload arg registers from stack in case ptrace changed them.
12108 * We don't reload %rax because syscall_trace_enter() returned
12109 @@ -611,7 +873,7 @@ tracesys:
12110 GLOBAL(int_ret_from_sys_call)
12111 DISABLE_INTERRUPTS(CLBR_NONE)
12112 TRACE_IRQS_OFF
12113 - testl $3,CS-ARGOFFSET(%rsp)
12114 + testb $3,CS-ARGOFFSET(%rsp)
12115 je retint_restore_args
12116 movl $_TIF_ALLWORK_MASK,%edi
12117 /* edi: mask to check */
12118 @@ -793,6 +1055,16 @@ END(interrupt)
12119 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12120 call save_args
12121 PARTIAL_FRAME 0
12122 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12123 + testb $3, CS(%rdi)
12124 + jnz 1f
12125 + pax_enter_kernel
12126 + jmp 2f
12127 +1: pax_enter_kernel_user
12128 +2:
12129 +#else
12130 + pax_enter_kernel
12131 +#endif
12132 call \func
12133 .endm
12134
12135 @@ -825,7 +1097,7 @@ ret_from_intr:
12136 CFI_ADJUST_CFA_OFFSET -8
12137 exit_intr:
12138 GET_THREAD_INFO(%rcx)
12139 - testl $3,CS-ARGOFFSET(%rsp)
12140 + testb $3,CS-ARGOFFSET(%rsp)
12141 je retint_kernel
12142
12143 /* Interrupt came from user space */
12144 @@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
12145 * The iretq could re-enable interrupts:
12146 */
12147 DISABLE_INTERRUPTS(CLBR_ANY)
12148 + pax_exit_kernel_user
12149 TRACE_IRQS_IRETQ
12150 SWAPGS
12151 jmp restore_args
12152
12153 retint_restore_args: /* return to kernel space */
12154 DISABLE_INTERRUPTS(CLBR_ANY)
12155 + pax_exit_kernel
12156 /*
12157 * The iretq could re-enable interrupts:
12158 */
12159 @@ -1027,6 +1301,16 @@ ENTRY(\sym)
12160 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12161 call error_entry
12162 DEFAULT_FRAME 0
12163 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12164 + testb $3, CS(%rsp)
12165 + jnz 1f
12166 + pax_enter_kernel
12167 + jmp 2f
12168 +1: pax_enter_kernel_user
12169 +2:
12170 +#else
12171 + pax_enter_kernel
12172 +#endif
12173 movq %rsp,%rdi /* pt_regs pointer */
12174 xorl %esi,%esi /* no error code */
12175 call \do_sym
12176 @@ -1044,6 +1328,16 @@ ENTRY(\sym)
12177 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12178 call save_paranoid
12179 TRACE_IRQS_OFF
12180 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12181 + testb $3, CS(%rsp)
12182 + jnz 1f
12183 + pax_enter_kernel
12184 + jmp 2f
12185 +1: pax_enter_kernel_user
12186 +2:
12187 +#else
12188 + pax_enter_kernel
12189 +#endif
12190 movq %rsp,%rdi /* pt_regs pointer */
12191 xorl %esi,%esi /* no error code */
12192 call \do_sym
12193 @@ -1052,7 +1346,7 @@ ENTRY(\sym)
12194 END(\sym)
12195 .endm
12196
12197 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12198 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12199 .macro paranoidzeroentry_ist sym do_sym ist
12200 ENTRY(\sym)
12201 INTR_FRAME
12202 @@ -1062,8 +1356,24 @@ ENTRY(\sym)
12203 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12204 call save_paranoid
12205 TRACE_IRQS_OFF
12206 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12207 + testb $3, CS(%rsp)
12208 + jnz 1f
12209 + pax_enter_kernel
12210 + jmp 2f
12211 +1: pax_enter_kernel_user
12212 +2:
12213 +#else
12214 + pax_enter_kernel
12215 +#endif
12216 movq %rsp,%rdi /* pt_regs pointer */
12217 xorl %esi,%esi /* no error code */
12218 +#ifdef CONFIG_SMP
12219 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12220 + lea init_tss(%r12), %r12
12221 +#else
12222 + lea init_tss(%rip), %r12
12223 +#endif
12224 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12225 call \do_sym
12226 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12227 @@ -1080,6 +1390,16 @@ ENTRY(\sym)
12228 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12229 call error_entry
12230 DEFAULT_FRAME 0
12231 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12232 + testb $3, CS(%rsp)
12233 + jnz 1f
12234 + pax_enter_kernel
12235 + jmp 2f
12236 +1: pax_enter_kernel_user
12237 +2:
12238 +#else
12239 + pax_enter_kernel
12240 +#endif
12241 movq %rsp,%rdi /* pt_regs pointer */
12242 movq ORIG_RAX(%rsp),%rsi /* get error code */
12243 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12244 @@ -1099,6 +1419,16 @@ ENTRY(\sym)
12245 call save_paranoid
12246 DEFAULT_FRAME 0
12247 TRACE_IRQS_OFF
12248 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12249 + testb $3, CS(%rsp)
12250 + jnz 1f
12251 + pax_enter_kernel
12252 + jmp 2f
12253 +1: pax_enter_kernel_user
12254 +2:
12255 +#else
12256 + pax_enter_kernel
12257 +#endif
12258 movq %rsp,%rdi /* pt_regs pointer */
12259 movq ORIG_RAX(%rsp),%rsi /* get error code */
12260 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12261 @@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
12262 TRACE_IRQS_OFF
12263 testl %ebx,%ebx /* swapgs needed? */
12264 jnz paranoid_restore
12265 - testl $3,CS(%rsp)
12266 + testb $3,CS(%rsp)
12267 jnz paranoid_userspace
12268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12269 + pax_exit_kernel
12270 + TRACE_IRQS_IRETQ 0
12271 + SWAPGS_UNSAFE_STACK
12272 + RESTORE_ALL 8
12273 + jmp irq_return
12274 +#endif
12275 paranoid_swapgs:
12276 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12277 + pax_exit_kernel_user
12278 +#else
12279 + pax_exit_kernel
12280 +#endif
12281 TRACE_IRQS_IRETQ 0
12282 SWAPGS_UNSAFE_STACK
12283 RESTORE_ALL 8
12284 jmp irq_return
12285 paranoid_restore:
12286 + pax_exit_kernel
12287 TRACE_IRQS_IRETQ 0
12288 RESTORE_ALL 8
12289 jmp irq_return
12290 @@ -1426,7 +1769,7 @@ ENTRY(error_entry)
12291 movq_cfi r14, R14+8
12292 movq_cfi r15, R15+8
12293 xorl %ebx,%ebx
12294 - testl $3,CS+8(%rsp)
12295 + testb $3,CS+8(%rsp)
12296 je error_kernelspace
12297 error_swapgs:
12298 SWAPGS
12299 @@ -1490,6 +1833,16 @@ ENTRY(nmi)
12300 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12301 call save_paranoid
12302 DEFAULT_FRAME 0
12303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12304 + testb $3, CS(%rsp)
12305 + jnz 1f
12306 + pax_enter_kernel
12307 + jmp 2f
12308 +1: pax_enter_kernel_user
12309 +2:
12310 +#else
12311 + pax_enter_kernel
12312 +#endif
12313 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12314 movq %rsp,%rdi
12315 movq $-1,%rsi
12316 @@ -1500,11 +1853,25 @@ ENTRY(nmi)
12317 DISABLE_INTERRUPTS(CLBR_NONE)
12318 testl %ebx,%ebx /* swapgs needed? */
12319 jnz nmi_restore
12320 - testl $3,CS(%rsp)
12321 + testb $3,CS(%rsp)
12322 jnz nmi_userspace
12323 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12324 + pax_exit_kernel
12325 + SWAPGS_UNSAFE_STACK
12326 + RESTORE_ALL 8
12327 + jmp irq_return
12328 +#endif
12329 nmi_swapgs:
12330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12331 + pax_exit_kernel_user
12332 +#else
12333 + pax_exit_kernel
12334 +#endif
12335 SWAPGS_UNSAFE_STACK
12336 + RESTORE_ALL 8
12337 + jmp irq_return
12338 nmi_restore:
12339 + pax_exit_kernel
12340 RESTORE_ALL 8
12341 jmp irq_return
12342 nmi_userspace:
12343 diff -urNp linux-2.6.39.4/arch/x86/kernel/ftrace.c linux-2.6.39.4/arch/x86/kernel/ftrace.c
12344 --- linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-05-19 00:06:34.000000000 -0400
12345 +++ linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-08-05 19:44:33.000000000 -0400
12346 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12347 static void *mod_code_newcode; /* holds the text to write to the IP */
12348
12349 static unsigned nmi_wait_count;
12350 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12351 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12352
12353 int ftrace_arch_read_dyn_info(char *buf, int size)
12354 {
12355 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12356
12357 r = snprintf(buf, size, "%u %u",
12358 nmi_wait_count,
12359 - atomic_read(&nmi_update_count));
12360 + atomic_read_unchecked(&nmi_update_count));
12361 return r;
12362 }
12363
12364 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12365
12366 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12367 smp_rmb();
12368 + pax_open_kernel();
12369 ftrace_mod_code();
12370 - atomic_inc(&nmi_update_count);
12371 + pax_close_kernel();
12372 + atomic_inc_unchecked(&nmi_update_count);
12373 }
12374 /* Must have previous changes seen before executions */
12375 smp_mb();
12376 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12377 {
12378 unsigned char replaced[MCOUNT_INSN_SIZE];
12379
12380 + ip = ktla_ktva(ip);
12381 +
12382 /*
12383 * Note: Due to modules and __init, code can
12384 * disappear and change, we need to protect against faulting
12385 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12386 unsigned char old[MCOUNT_INSN_SIZE], *new;
12387 int ret;
12388
12389 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12390 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12391 new = ftrace_call_replace(ip, (unsigned long)func);
12392 ret = ftrace_modify_code(ip, old, new);
12393
12394 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12395 {
12396 unsigned char code[MCOUNT_INSN_SIZE];
12397
12398 + ip = ktla_ktva(ip);
12399 +
12400 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12401 return -EFAULT;
12402
12403 diff -urNp linux-2.6.39.4/arch/x86/kernel/head32.c linux-2.6.39.4/arch/x86/kernel/head32.c
12404 --- linux-2.6.39.4/arch/x86/kernel/head32.c 2011-05-19 00:06:34.000000000 -0400
12405 +++ linux-2.6.39.4/arch/x86/kernel/head32.c 2011-08-05 19:44:33.000000000 -0400
12406 @@ -19,6 +19,7 @@
12407 #include <asm/io_apic.h>
12408 #include <asm/bios_ebda.h>
12409 #include <asm/tlbflush.h>
12410 +#include <asm/boot.h>
12411
12412 static void __init i386_default_early_setup(void)
12413 {
12414 @@ -34,7 +35,7 @@ void __init i386_start_kernel(void)
12415 {
12416 memblock_init();
12417
12418 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12419 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12420
12421 #ifdef CONFIG_BLK_DEV_INITRD
12422 /* Reserve INITRD */
12423 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_32.S linux-2.6.39.4/arch/x86/kernel/head_32.S
12424 --- linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-05-19 00:06:34.000000000 -0400
12425 +++ linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-08-05 19:44:33.000000000 -0400
12426 @@ -25,6 +25,12 @@
12427 /* Physical address */
12428 #define pa(X) ((X) - __PAGE_OFFSET)
12429
12430 +#ifdef CONFIG_PAX_KERNEXEC
12431 +#define ta(X) (X)
12432 +#else
12433 +#define ta(X) ((X) - __PAGE_OFFSET)
12434 +#endif
12435 +
12436 /*
12437 * References to members of the new_cpu_data structure.
12438 */
12439 @@ -54,11 +60,7 @@
12440 * and small than max_low_pfn, otherwise will waste some page table entries
12441 */
12442
12443 -#if PTRS_PER_PMD > 1
12444 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12445 -#else
12446 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12447 -#endif
12448 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12449
12450 /* Number of possible pages in the lowmem region */
12451 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12452 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12453 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12454
12455 /*
12456 + * Real beginning of normal "text" segment
12457 + */
12458 +ENTRY(stext)
12459 +ENTRY(_stext)
12460 +
12461 +/*
12462 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12463 * %esi points to the real-mode code as a 32-bit pointer.
12464 * CS and DS must be 4 GB flat segments, but we don't depend on
12465 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12466 * can.
12467 */
12468 __HEAD
12469 +
12470 +#ifdef CONFIG_PAX_KERNEXEC
12471 + jmp startup_32
12472 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12473 +.fill PAGE_SIZE-5,1,0xcc
12474 +#endif
12475 +
12476 ENTRY(startup_32)
12477 movl pa(stack_start),%ecx
12478
12479 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12480 2:
12481 leal -__PAGE_OFFSET(%ecx),%esp
12482
12483 +#ifdef CONFIG_SMP
12484 + movl $pa(cpu_gdt_table),%edi
12485 + movl $__per_cpu_load,%eax
12486 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12487 + rorl $16,%eax
12488 + movb %al,__KERNEL_PERCPU + 4(%edi)
12489 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12490 + movl $__per_cpu_end - 1,%eax
12491 + subl $__per_cpu_start,%eax
12492 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12493 +#endif
12494 +
12495 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12496 + movl $NR_CPUS,%ecx
12497 + movl $pa(cpu_gdt_table),%edi
12498 +1:
12499 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12500 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12501 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12502 + addl $PAGE_SIZE_asm,%edi
12503 + loop 1b
12504 +#endif
12505 +
12506 +#ifdef CONFIG_PAX_KERNEXEC
12507 + movl $pa(boot_gdt),%edi
12508 + movl $__LOAD_PHYSICAL_ADDR,%eax
12509 + movw %ax,__BOOT_CS + 2(%edi)
12510 + rorl $16,%eax
12511 + movb %al,__BOOT_CS + 4(%edi)
12512 + movb %ah,__BOOT_CS + 7(%edi)
12513 + rorl $16,%eax
12514 +
12515 + ljmp $(__BOOT_CS),$1f
12516 +1:
12517 +
12518 + movl $NR_CPUS,%ecx
12519 + movl $pa(cpu_gdt_table),%edi
12520 + addl $__PAGE_OFFSET,%eax
12521 +1:
12522 + movw %ax,__KERNEL_CS + 2(%edi)
12523 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12524 + rorl $16,%eax
12525 + movb %al,__KERNEL_CS + 4(%edi)
12526 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12527 + movb %ah,__KERNEL_CS + 7(%edi)
12528 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12529 + rorl $16,%eax
12530 + addl $PAGE_SIZE_asm,%edi
12531 + loop 1b
12532 +#endif
12533 +
12534 /*
12535 * Clear BSS first so that there are no surprises...
12536 */
12537 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12538 movl %eax, pa(max_pfn_mapped)
12539
12540 /* Do early initialization of the fixmap area */
12541 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12542 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12543 +#ifdef CONFIG_COMPAT_VDSO
12544 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12545 +#else
12546 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12547 +#endif
12548 #else /* Not PAE */
12549
12550 page_pde_offset = (__PAGE_OFFSET >> 20);
12551 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12552 movl %eax, pa(max_pfn_mapped)
12553
12554 /* Do early initialization of the fixmap area */
12555 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12556 - movl %eax,pa(initial_page_table+0xffc)
12557 +#ifdef CONFIG_COMPAT_VDSO
12558 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12559 +#else
12560 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12561 +#endif
12562 #endif
12563
12564 #ifdef CONFIG_PARAVIRT
12565 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12566 cmpl $num_subarch_entries, %eax
12567 jae bad_subarch
12568
12569 - movl pa(subarch_entries)(,%eax,4), %eax
12570 - subl $__PAGE_OFFSET, %eax
12571 - jmp *%eax
12572 + jmp *pa(subarch_entries)(,%eax,4)
12573
12574 bad_subarch:
12575 WEAK(lguest_entry)
12576 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12577 __INITDATA
12578
12579 subarch_entries:
12580 - .long default_entry /* normal x86/PC */
12581 - .long lguest_entry /* lguest hypervisor */
12582 - .long xen_entry /* Xen hypervisor */
12583 - .long default_entry /* Moorestown MID */
12584 + .long ta(default_entry) /* normal x86/PC */
12585 + .long ta(lguest_entry) /* lguest hypervisor */
12586 + .long ta(xen_entry) /* Xen hypervisor */
12587 + .long ta(default_entry) /* Moorestown MID */
12588 num_subarch_entries = (. - subarch_entries) / 4
12589 .previous
12590 #else
12591 @@ -312,6 +382,7 @@ default_entry:
12592 orl %edx,%eax
12593 movl %eax,%cr4
12594
12595 +#ifdef CONFIG_X86_PAE
12596 testb $X86_CR4_PAE, %al # check if PAE is enabled
12597 jz 6f
12598
12599 @@ -340,6 +411,9 @@ default_entry:
12600 /* Make changes effective */
12601 wrmsr
12602
12603 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12604 +#endif
12605 +
12606 6:
12607
12608 /*
12609 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12610 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12611 movl %eax,%ss # after changing gdt.
12612
12613 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12614 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12615 movl %eax,%ds
12616 movl %eax,%es
12617
12618 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12619 */
12620 cmpb $0,ready
12621 jne 1f
12622 - movl $gdt_page,%eax
12623 + movl $cpu_gdt_table,%eax
12624 movl $stack_canary,%ecx
12625 +#ifdef CONFIG_SMP
12626 + addl $__per_cpu_load,%ecx
12627 +#endif
12628 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12629 shrl $16, %ecx
12630 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12631 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12632 1:
12633 -#endif
12634 movl $(__KERNEL_STACK_CANARY),%eax
12635 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12636 + movl $(__USER_DS),%eax
12637 +#else
12638 + xorl %eax,%eax
12639 +#endif
12640 movl %eax,%gs
12641
12642 xorl %eax,%eax # Clear LDT
12643 @@ -558,22 +639,22 @@ early_page_fault:
12644 jmp early_fault
12645
12646 early_fault:
12647 - cld
12648 #ifdef CONFIG_PRINTK
12649 + cmpl $1,%ss:early_recursion_flag
12650 + je hlt_loop
12651 + incl %ss:early_recursion_flag
12652 + cld
12653 pusha
12654 movl $(__KERNEL_DS),%eax
12655 movl %eax,%ds
12656 movl %eax,%es
12657 - cmpl $2,early_recursion_flag
12658 - je hlt_loop
12659 - incl early_recursion_flag
12660 movl %cr2,%eax
12661 pushl %eax
12662 pushl %edx /* trapno */
12663 pushl $fault_msg
12664 call printk
12665 +; call dump_stack
12666 #endif
12667 - call dump_stack
12668 hlt_loop:
12669 hlt
12670 jmp hlt_loop
12671 @@ -581,8 +662,11 @@ hlt_loop:
12672 /* This is the default interrupt "handler" :-) */
12673 ALIGN
12674 ignore_int:
12675 - cld
12676 #ifdef CONFIG_PRINTK
12677 + cmpl $2,%ss:early_recursion_flag
12678 + je hlt_loop
12679 + incl %ss:early_recursion_flag
12680 + cld
12681 pushl %eax
12682 pushl %ecx
12683 pushl %edx
12684 @@ -591,9 +675,6 @@ ignore_int:
12685 movl $(__KERNEL_DS),%eax
12686 movl %eax,%ds
12687 movl %eax,%es
12688 - cmpl $2,early_recursion_flag
12689 - je hlt_loop
12690 - incl early_recursion_flag
12691 pushl 16(%esp)
12692 pushl 24(%esp)
12693 pushl 32(%esp)
12694 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12695 /*
12696 * BSS section
12697 */
12698 -__PAGE_ALIGNED_BSS
12699 - .align PAGE_SIZE
12700 #ifdef CONFIG_X86_PAE
12701 +.section .initial_pg_pmd,"a",@progbits
12702 initial_pg_pmd:
12703 .fill 1024*KPMDS,4,0
12704 #else
12705 +.section .initial_page_table,"a",@progbits
12706 ENTRY(initial_page_table)
12707 .fill 1024,4,0
12708 #endif
12709 +.section .initial_pg_fixmap,"a",@progbits
12710 initial_pg_fixmap:
12711 .fill 1024,4,0
12712 +.section .empty_zero_page,"a",@progbits
12713 ENTRY(empty_zero_page)
12714 .fill 4096,1,0
12715 +.section .swapper_pg_dir,"a",@progbits
12716 ENTRY(swapper_pg_dir)
12717 +#ifdef CONFIG_X86_PAE
12718 + .fill 4,8,0
12719 +#else
12720 .fill 1024,4,0
12721 +#endif
12722 +
12723 +/*
12724 + * The IDT has to be page-aligned to simplify the Pentium
12725 + * F0 0F bug workaround.. We have a special link segment
12726 + * for this.
12727 + */
12728 +.section .idt,"a",@progbits
12729 +ENTRY(idt_table)
12730 + .fill 256,8,0
12731
12732 /*
12733 * This starts the data section.
12734 */
12735 #ifdef CONFIG_X86_PAE
12736 -__PAGE_ALIGNED_DATA
12737 - /* Page-aligned for the benefit of paravirt? */
12738 - .align PAGE_SIZE
12739 +.section .initial_page_table,"a",@progbits
12740 ENTRY(initial_page_table)
12741 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12742 # if KPMDS == 3
12743 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12744 # error "Kernel PMDs should be 1, 2 or 3"
12745 # endif
12746 .align PAGE_SIZE /* needs to be page-sized too */
12747 +
12748 +#ifdef CONFIG_PAX_PER_CPU_PGD
12749 +ENTRY(cpu_pgd)
12750 + .rept NR_CPUS
12751 + .fill 4,8,0
12752 + .endr
12753 +#endif
12754 +
12755 #endif
12756
12757 .data
12758 .balign 4
12759 ENTRY(stack_start)
12760 - .long init_thread_union+THREAD_SIZE
12761 + .long init_thread_union+THREAD_SIZE-8
12762 +
12763 +ready: .byte 0
12764
12765 +.section .rodata,"a",@progbits
12766 early_recursion_flag:
12767 .long 0
12768
12769 -ready: .byte 0
12770 -
12771 int_msg:
12772 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12773
12774 @@ -707,7 +811,7 @@ fault_msg:
12775 .word 0 # 32 bit align gdt_desc.address
12776 boot_gdt_descr:
12777 .word __BOOT_DS+7
12778 - .long boot_gdt - __PAGE_OFFSET
12779 + .long pa(boot_gdt)
12780
12781 .word 0 # 32-bit align idt_desc.address
12782 idt_descr:
12783 @@ -718,7 +822,7 @@ idt_descr:
12784 .word 0 # 32 bit align gdt_desc.address
12785 ENTRY(early_gdt_descr)
12786 .word GDT_ENTRIES*8-1
12787 - .long gdt_page /* Overwritten for secondary CPUs */
12788 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12789
12790 /*
12791 * The boot_gdt must mirror the equivalent in setup.S and is
12792 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12793 .align L1_CACHE_BYTES
12794 ENTRY(boot_gdt)
12795 .fill GDT_ENTRY_BOOT_CS,8,0
12796 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12797 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12798 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12799 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12800 +
12801 + .align PAGE_SIZE_asm
12802 +ENTRY(cpu_gdt_table)
12803 + .rept NR_CPUS
12804 + .quad 0x0000000000000000 /* NULL descriptor */
12805 + .quad 0x0000000000000000 /* 0x0b reserved */
12806 + .quad 0x0000000000000000 /* 0x13 reserved */
12807 + .quad 0x0000000000000000 /* 0x1b reserved */
12808 +
12809 +#ifdef CONFIG_PAX_KERNEXEC
12810 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12811 +#else
12812 + .quad 0x0000000000000000 /* 0x20 unused */
12813 +#endif
12814 +
12815 + .quad 0x0000000000000000 /* 0x28 unused */
12816 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12817 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12818 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12819 + .quad 0x0000000000000000 /* 0x4b reserved */
12820 + .quad 0x0000000000000000 /* 0x53 reserved */
12821 + .quad 0x0000000000000000 /* 0x5b reserved */
12822 +
12823 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12824 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12825 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12826 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12827 +
12828 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12829 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12830 +
12831 + /*
12832 + * Segments used for calling PnP BIOS have byte granularity.
12833 + * The code segments and data segments have fixed 64k limits,
12834 + * the transfer segment sizes are set at run time.
12835 + */
12836 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12837 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12838 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12839 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12840 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12841 +
12842 + /*
12843 + * The APM segments have byte granularity and their bases
12844 + * are set at run time. All have 64k limits.
12845 + */
12846 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12847 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12848 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12849 +
12850 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12851 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12852 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12853 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12854 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12855 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12856 +
12857 + /* Be sure this is zeroed to avoid false validations in Xen */
12858 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12859 + .endr
12860 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_64.S linux-2.6.39.4/arch/x86/kernel/head_64.S
12861 --- linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-05-19 00:06:34.000000000 -0400
12862 +++ linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-08-05 19:44:33.000000000 -0400
12863 @@ -19,6 +19,7 @@
12864 #include <asm/cache.h>
12865 #include <asm/processor-flags.h>
12866 #include <asm/percpu.h>
12867 +#include <asm/cpufeature.h>
12868
12869 #ifdef CONFIG_PARAVIRT
12870 #include <asm/asm-offsets.h>
12871 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12872 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12873 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12874 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12875 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12876 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12877 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12878 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12879
12880 .text
12881 __HEAD
12882 @@ -85,35 +90,22 @@ startup_64:
12883 */
12884 addq %rbp, init_level4_pgt + 0(%rip)
12885 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12886 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12887 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12888 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12889
12890 addq %rbp, level3_ident_pgt + 0(%rip)
12891 +#ifndef CONFIG_XEN
12892 + addq %rbp, level3_ident_pgt + 8(%rip)
12893 +#endif
12894
12895 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12896 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12897 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12898
12899 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12900 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12901 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12902
12903 - /* Add an Identity mapping if I am above 1G */
12904 - leaq _text(%rip), %rdi
12905 - andq $PMD_PAGE_MASK, %rdi
12906 -
12907 - movq %rdi, %rax
12908 - shrq $PUD_SHIFT, %rax
12909 - andq $(PTRS_PER_PUD - 1), %rax
12910 - jz ident_complete
12911 -
12912 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12913 - leaq level3_ident_pgt(%rip), %rbx
12914 - movq %rdx, 0(%rbx, %rax, 8)
12915 -
12916 - movq %rdi, %rax
12917 - shrq $PMD_SHIFT, %rax
12918 - andq $(PTRS_PER_PMD - 1), %rax
12919 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12920 - leaq level2_spare_pgt(%rip), %rbx
12921 - movq %rdx, 0(%rbx, %rax, 8)
12922 -ident_complete:
12923 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12924 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12925
12926 /*
12927 * Fixup the kernel text+data virtual addresses. Note that
12928 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12929 * after the boot processor executes this code.
12930 */
12931
12932 - /* Enable PAE mode and PGE */
12933 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12934 + /* Enable PAE mode and PSE/PGE */
12935 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12936 movq %rax, %cr4
12937
12938 /* Setup early boot stage 4 level pagetables. */
12939 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12940 movl $MSR_EFER, %ecx
12941 rdmsr
12942 btsl $_EFER_SCE, %eax /* Enable System Call */
12943 - btl $20,%edi /* No Execute supported? */
12944 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12945 jnc 1f
12946 btsl $_EFER_NX, %eax
12947 + leaq init_level4_pgt(%rip), %rdi
12948 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12949 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12950 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12951 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12952 1: wrmsr /* Make changes effective */
12953
12954 /* Setup cr0 */
12955 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12956 bad_address:
12957 jmp bad_address
12958
12959 - .section ".init.text","ax"
12960 + __INIT
12961 #ifdef CONFIG_EARLY_PRINTK
12962 .globl early_idt_handlers
12963 early_idt_handlers:
12964 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
12965 #endif /* EARLY_PRINTK */
12966 1: hlt
12967 jmp 1b
12968 + .previous
12969
12970 #ifdef CONFIG_EARLY_PRINTK
12971 + __INITDATA
12972 early_recursion_flag:
12973 .long 0
12974 + .previous
12975
12976 + .section .rodata,"a",@progbits
12977 early_idt_msg:
12978 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12979 early_idt_ripmsg:
12980 .asciz "RIP %s\n"
12981 -#endif /* CONFIG_EARLY_PRINTK */
12982 .previous
12983 +#endif /* CONFIG_EARLY_PRINTK */
12984
12985 + .section .rodata,"a",@progbits
12986 #define NEXT_PAGE(name) \
12987 .balign PAGE_SIZE; \
12988 ENTRY(name)
12989 @@ -338,7 +340,6 @@ ENTRY(name)
12990 i = i + 1 ; \
12991 .endr
12992
12993 - .data
12994 /*
12995 * This default setting generates an ident mapping at address 0x100000
12996 * and a mapping for the kernel that precisely maps virtual address
12997 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
12998 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12999 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13000 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13001 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
13002 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13003 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13004 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13005 .org init_level4_pgt + L4_START_KERNEL*8, 0
13006 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13007 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13008
13009 +#ifdef CONFIG_PAX_PER_CPU_PGD
13010 +NEXT_PAGE(cpu_pgd)
13011 + .rept NR_CPUS
13012 + .fill 512,8,0
13013 + .endr
13014 +#endif
13015 +
13016 NEXT_PAGE(level3_ident_pgt)
13017 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13018 +#ifdef CONFIG_XEN
13019 .fill 511,8,0
13020 +#else
13021 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13022 + .fill 510,8,0
13023 +#endif
13024 +
13025 +NEXT_PAGE(level3_vmalloc_pgt)
13026 + .fill 512,8,0
13027 +
13028 +NEXT_PAGE(level3_vmemmap_pgt)
13029 + .fill L3_VMEMMAP_START,8,0
13030 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13031
13032 NEXT_PAGE(level3_kernel_pgt)
13033 .fill L3_START_KERNEL,8,0
13034 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13035 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13036 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13037
13038 +NEXT_PAGE(level2_vmemmap_pgt)
13039 + .fill 512,8,0
13040 +
13041 NEXT_PAGE(level2_fixmap_pgt)
13042 - .fill 506,8,0
13043 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13044 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13045 - .fill 5,8,0
13046 + .fill 507,8,0
13047 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13048 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13049 + .fill 4,8,0
13050
13051 -NEXT_PAGE(level1_fixmap_pgt)
13052 +NEXT_PAGE(level1_vsyscall_pgt)
13053 .fill 512,8,0
13054
13055 -NEXT_PAGE(level2_ident_pgt)
13056 - /* Since I easily can, map the first 1G.
13057 + /* Since I easily can, map the first 2G.
13058 * Don't set NX because code runs from these pages.
13059 */
13060 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13061 +NEXT_PAGE(level2_ident_pgt)
13062 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13063
13064 NEXT_PAGE(level2_kernel_pgt)
13065 /*
13066 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13067 * If you want to increase this then increase MODULES_VADDR
13068 * too.)
13069 */
13070 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13071 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13072 -
13073 -NEXT_PAGE(level2_spare_pgt)
13074 - .fill 512, 8, 0
13075 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13076
13077 #undef PMDS
13078 #undef NEXT_PAGE
13079
13080 - .data
13081 + .align PAGE_SIZE
13082 +ENTRY(cpu_gdt_table)
13083 + .rept NR_CPUS
13084 + .quad 0x0000000000000000 /* NULL descriptor */
13085 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13086 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13087 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13088 + .quad 0x00cffb000000ffff /* __USER32_CS */
13089 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13090 + .quad 0x00affb000000ffff /* __USER_CS */
13091 +
13092 +#ifdef CONFIG_PAX_KERNEXEC
13093 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13094 +#else
13095 + .quad 0x0 /* unused */
13096 +#endif
13097 +
13098 + .quad 0,0 /* TSS */
13099 + .quad 0,0 /* LDT */
13100 + .quad 0,0,0 /* three TLS descriptors */
13101 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13102 + /* asm/segment.h:GDT_ENTRIES must match this */
13103 +
13104 + /* zero the remaining page */
13105 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13106 + .endr
13107 +
13108 .align 16
13109 .globl early_gdt_descr
13110 early_gdt_descr:
13111 .word GDT_ENTRIES*8-1
13112 early_gdt_descr_base:
13113 - .quad INIT_PER_CPU_VAR(gdt_page)
13114 + .quad cpu_gdt_table
13115
13116 ENTRY(phys_base)
13117 /* This must match the first entry in level2_kernel_pgt */
13118 .quad 0x0000000000000000
13119
13120 #include "../../x86/xen/xen-head.S"
13121 -
13122 - .section .bss, "aw", @nobits
13123 +
13124 + .section .rodata,"a",@progbits
13125 .align L1_CACHE_BYTES
13126 ENTRY(idt_table)
13127 - .skip IDT_ENTRIES * 16
13128 + .fill 512,8,0
13129
13130 __PAGE_ALIGNED_BSS
13131 .align PAGE_SIZE
13132 diff -urNp linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c
13133 --- linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-05-19 00:06:34.000000000 -0400
13134 +++ linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-05 19:44:33.000000000 -0400
13135 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13136 EXPORT_SYMBOL(cmpxchg8b_emu);
13137 #endif
13138
13139 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13140 +
13141 /* Networking helper routines. */
13142 EXPORT_SYMBOL(csum_partial_copy_generic);
13143 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13144 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13145
13146 EXPORT_SYMBOL(__get_user_1);
13147 EXPORT_SYMBOL(__get_user_2);
13148 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13149
13150 EXPORT_SYMBOL(csum_partial);
13151 EXPORT_SYMBOL(empty_zero_page);
13152 +
13153 +#ifdef CONFIG_PAX_KERNEXEC
13154 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13155 +#endif
13156 diff -urNp linux-2.6.39.4/arch/x86/kernel/i8259.c linux-2.6.39.4/arch/x86/kernel/i8259.c
13157 --- linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-05-19 00:06:34.000000000 -0400
13158 +++ linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-08-05 19:44:33.000000000 -0400
13159 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13160 "spurious 8259A interrupt: IRQ%d.\n", irq);
13161 spurious_irq_mask |= irqmask;
13162 }
13163 - atomic_inc(&irq_err_count);
13164 + atomic_inc_unchecked(&irq_err_count);
13165 /*
13166 * Theoretically we do not have to handle this IRQ,
13167 * but in Linux this does not cause problems and is
13168 diff -urNp linux-2.6.39.4/arch/x86/kernel/init_task.c linux-2.6.39.4/arch/x86/kernel/init_task.c
13169 --- linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-05-19 00:06:34.000000000 -0400
13170 +++ linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-08-05 19:44:33.000000000 -0400
13171 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13172 * way process stacks are handled. This is done by having a special
13173 * "init_task" linker map entry..
13174 */
13175 -union thread_union init_thread_union __init_task_data =
13176 - { INIT_THREAD_INFO(init_task) };
13177 +union thread_union init_thread_union __init_task_data;
13178
13179 /*
13180 * Initial task structure.
13181 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13182 * section. Since TSS's are completely CPU-local, we want them
13183 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13184 */
13185 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13186 -
13187 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13188 +EXPORT_SYMBOL(init_tss);
13189 diff -urNp linux-2.6.39.4/arch/x86/kernel/ioport.c linux-2.6.39.4/arch/x86/kernel/ioport.c
13190 --- linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400
13191 +++ linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-08-05 19:44:33.000000000 -0400
13192 @@ -6,6 +6,7 @@
13193 #include <linux/sched.h>
13194 #include <linux/kernel.h>
13195 #include <linux/capability.h>
13196 +#include <linux/security.h>
13197 #include <linux/errno.h>
13198 #include <linux/types.h>
13199 #include <linux/ioport.h>
13200 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13201
13202 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13203 return -EINVAL;
13204 +#ifdef CONFIG_GRKERNSEC_IO
13205 + if (turn_on && grsec_disable_privio) {
13206 + gr_handle_ioperm();
13207 + return -EPERM;
13208 + }
13209 +#endif
13210 if (turn_on && !capable(CAP_SYS_RAWIO))
13211 return -EPERM;
13212
13213 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13214 * because the ->io_bitmap_max value must match the bitmap
13215 * contents:
13216 */
13217 - tss = &per_cpu(init_tss, get_cpu());
13218 + tss = init_tss + get_cpu();
13219
13220 if (turn_on)
13221 bitmap_clear(t->io_bitmap_ptr, from, num);
13222 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13223 return -EINVAL;
13224 /* Trying to gain more privileges? */
13225 if (level > old) {
13226 +#ifdef CONFIG_GRKERNSEC_IO
13227 + if (grsec_disable_privio) {
13228 + gr_handle_iopl();
13229 + return -EPERM;
13230 + }
13231 +#endif
13232 if (!capable(CAP_SYS_RAWIO))
13233 return -EPERM;
13234 }
13235 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq_32.c linux-2.6.39.4/arch/x86/kernel/irq_32.c
13236 --- linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-05-19 00:06:34.000000000 -0400
13237 +++ linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-08-05 19:44:33.000000000 -0400
13238 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13239 __asm__ __volatile__("andl %%esp,%0" :
13240 "=r" (sp) : "0" (THREAD_SIZE - 1));
13241
13242 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13243 + return sp < STACK_WARN;
13244 }
13245
13246 static void print_stack_overflow(void)
13247 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13248 * per-CPU IRQ handling contexts (thread information and stack)
13249 */
13250 union irq_ctx {
13251 - struct thread_info tinfo;
13252 - u32 stack[THREAD_SIZE/sizeof(u32)];
13253 + unsigned long previous_esp;
13254 + u32 stack[THREAD_SIZE/sizeof(u32)];
13255 } __attribute__((aligned(THREAD_SIZE)));
13256
13257 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13258 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13259 static inline int
13260 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13261 {
13262 - union irq_ctx *curctx, *irqctx;
13263 + union irq_ctx *irqctx;
13264 u32 *isp, arg1, arg2;
13265
13266 - curctx = (union irq_ctx *) current_thread_info();
13267 irqctx = __this_cpu_read(hardirq_ctx);
13268
13269 /*
13270 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13271 * handler) we can't do that and just have to keep using the
13272 * current stack (which is the irq stack already after all)
13273 */
13274 - if (unlikely(curctx == irqctx))
13275 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13276 return 0;
13277
13278 /* build the stack frame on the IRQ stack */
13279 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13280 - irqctx->tinfo.task = curctx->tinfo.task;
13281 - irqctx->tinfo.previous_esp = current_stack_pointer;
13282 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13283 + irqctx->previous_esp = current_stack_pointer;
13284
13285 - /*
13286 - * Copy the softirq bits in preempt_count so that the
13287 - * softirq checks work in the hardirq context.
13288 - */
13289 - irqctx->tinfo.preempt_count =
13290 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13291 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13292 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13293 + __set_fs(MAKE_MM_SEG(0));
13294 +#endif
13295
13296 if (unlikely(overflow))
13297 call_on_stack(print_stack_overflow, isp);
13298 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13299 : "0" (irq), "1" (desc), "2" (isp),
13300 "D" (desc->handle_irq)
13301 : "memory", "cc", "ecx");
13302 +
13303 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13304 + __set_fs(current_thread_info()->addr_limit);
13305 +#endif
13306 +
13307 return 1;
13308 }
13309
13310 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13311 */
13312 void __cpuinit irq_ctx_init(int cpu)
13313 {
13314 - union irq_ctx *irqctx;
13315 -
13316 if (per_cpu(hardirq_ctx, cpu))
13317 return;
13318
13319 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13320 - THREAD_FLAGS,
13321 - THREAD_ORDER));
13322 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13323 - irqctx->tinfo.cpu = cpu;
13324 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13325 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13326 -
13327 - per_cpu(hardirq_ctx, cpu) = irqctx;
13328 -
13329 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13330 - THREAD_FLAGS,
13331 - THREAD_ORDER));
13332 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13333 - irqctx->tinfo.cpu = cpu;
13334 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13335 -
13336 - per_cpu(softirq_ctx, cpu) = irqctx;
13337 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13338 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13339
13340 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13341 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13342 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13343 asmlinkage void do_softirq(void)
13344 {
13345 unsigned long flags;
13346 - struct thread_info *curctx;
13347 union irq_ctx *irqctx;
13348 u32 *isp;
13349
13350 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13351 local_irq_save(flags);
13352
13353 if (local_softirq_pending()) {
13354 - curctx = current_thread_info();
13355 irqctx = __this_cpu_read(softirq_ctx);
13356 - irqctx->tinfo.task = curctx->task;
13357 - irqctx->tinfo.previous_esp = current_stack_pointer;
13358 + irqctx->previous_esp = current_stack_pointer;
13359
13360 /* build the stack frame on the softirq stack */
13361 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13362 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13363 +
13364 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13365 + __set_fs(MAKE_MM_SEG(0));
13366 +#endif
13367
13368 call_on_stack(__do_softirq, isp);
13369 +
13370 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13371 + __set_fs(current_thread_info()->addr_limit);
13372 +#endif
13373 +
13374 /*
13375 * Shouldn't happen, we returned above if in_interrupt():
13376 */
13377 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq.c linux-2.6.39.4/arch/x86/kernel/irq.c
13378 --- linux-2.6.39.4/arch/x86/kernel/irq.c 2011-05-19 00:06:34.000000000 -0400
13379 +++ linux-2.6.39.4/arch/x86/kernel/irq.c 2011-08-05 19:44:33.000000000 -0400
13380 @@ -17,7 +17,7 @@
13381 #include <asm/mce.h>
13382 #include <asm/hw_irq.h>
13383
13384 -atomic_t irq_err_count;
13385 +atomic_unchecked_t irq_err_count;
13386
13387 /* Function pointer for generic interrupt vector handling */
13388 void (*x86_platform_ipi_callback)(void) = NULL;
13389 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13390 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13391 seq_printf(p, " Machine check polls\n");
13392 #endif
13393 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13394 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13395 #if defined(CONFIG_X86_IO_APIC)
13396 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13397 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13398 #endif
13399 return 0;
13400 }
13401 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13402
13403 u64 arch_irq_stat(void)
13404 {
13405 - u64 sum = atomic_read(&irq_err_count);
13406 + u64 sum = atomic_read_unchecked(&irq_err_count);
13407
13408 #ifdef CONFIG_X86_IO_APIC
13409 - sum += atomic_read(&irq_mis_count);
13410 + sum += atomic_read_unchecked(&irq_mis_count);
13411 #endif
13412 return sum;
13413 }
13414 diff -urNp linux-2.6.39.4/arch/x86/kernel/kgdb.c linux-2.6.39.4/arch/x86/kernel/kgdb.c
13415 --- linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
13416 +++ linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-08-05 20:34:06.000000000 -0400
13417 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13418 #ifdef CONFIG_X86_32
13419 switch (regno) {
13420 case GDB_SS:
13421 - if (!user_mode_vm(regs))
13422 + if (!user_mode(regs))
13423 *(unsigned long *)mem = __KERNEL_DS;
13424 break;
13425 case GDB_SP:
13426 - if (!user_mode_vm(regs))
13427 + if (!user_mode(regs))
13428 *(unsigned long *)mem = kernel_stack_pointer(regs);
13429 break;
13430 case GDB_GS:
13431 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13432 case 'k':
13433 /* clear the trace bit */
13434 linux_regs->flags &= ~X86_EFLAGS_TF;
13435 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13436 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13437
13438 /* set the trace bit if we're stepping */
13439 if (remcomInBuffer[0] == 's') {
13440 linux_regs->flags |= X86_EFLAGS_TF;
13441 - atomic_set(&kgdb_cpu_doing_single_step,
13442 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13443 raw_smp_processor_id());
13444 }
13445
13446 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13447 return NOTIFY_DONE;
13448
13449 case DIE_DEBUG:
13450 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13451 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13452 if (user_mode(regs))
13453 return single_step_cont(regs, args);
13454 break;
13455 diff -urNp linux-2.6.39.4/arch/x86/kernel/kprobes.c linux-2.6.39.4/arch/x86/kernel/kprobes.c
13456 --- linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
13457 +++ linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-08-05 19:44:33.000000000 -0400
13458 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13459 } __attribute__((packed)) *insn;
13460
13461 insn = (struct __arch_relative_insn *)from;
13462 +
13463 + pax_open_kernel();
13464 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13465 insn->op = op;
13466 + pax_close_kernel();
13467 }
13468
13469 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13470 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13471 kprobe_opcode_t opcode;
13472 kprobe_opcode_t *orig_opcodes = opcodes;
13473
13474 - if (search_exception_tables((unsigned long)opcodes))
13475 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13476 return 0; /* Page fault may occur on this address. */
13477
13478 retry:
13479 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13480 }
13481 }
13482 insn_get_length(&insn);
13483 + pax_open_kernel();
13484 memcpy(dest, insn.kaddr, insn.length);
13485 + pax_close_kernel();
13486
13487 #ifdef CONFIG_X86_64
13488 if (insn_rip_relative(&insn)) {
13489 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13490 (u8 *) dest;
13491 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13492 disp = (u8 *) dest + insn_offset_displacement(&insn);
13493 + pax_open_kernel();
13494 *(s32 *) disp = (s32) newdisp;
13495 + pax_close_kernel();
13496 }
13497 #endif
13498 return insn.length;
13499 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13500 */
13501 __copy_instruction(p->ainsn.insn, p->addr, 0);
13502
13503 - if (can_boost(p->addr))
13504 + if (can_boost(ktla_ktva(p->addr)))
13505 p->ainsn.boostable = 0;
13506 else
13507 p->ainsn.boostable = -1;
13508
13509 - p->opcode = *p->addr;
13510 + p->opcode = *(ktla_ktva(p->addr));
13511 }
13512
13513 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13514 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13515 * nor set current_kprobe, because it doesn't use single
13516 * stepping.
13517 */
13518 - regs->ip = (unsigned long)p->ainsn.insn;
13519 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13520 preempt_enable_no_resched();
13521 return;
13522 }
13523 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13524 if (p->opcode == BREAKPOINT_INSTRUCTION)
13525 regs->ip = (unsigned long)p->addr;
13526 else
13527 - regs->ip = (unsigned long)p->ainsn.insn;
13528 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13529 }
13530
13531 /*
13532 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13533 setup_singlestep(p, regs, kcb, 0);
13534 return 1;
13535 }
13536 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13537 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13538 /*
13539 * The breakpoint instruction was removed right
13540 * after we hit it. Another cpu has removed
13541 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13542 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13543 {
13544 unsigned long *tos = stack_addr(regs);
13545 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13546 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13547 unsigned long orig_ip = (unsigned long)p->addr;
13548 kprobe_opcode_t *insn = p->ainsn.insn;
13549
13550 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13551 struct die_args *args = data;
13552 int ret = NOTIFY_DONE;
13553
13554 - if (args->regs && user_mode_vm(args->regs))
13555 + if (args->regs && user_mode(args->regs))
13556 return ret;
13557
13558 switch (val) {
13559 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13560 * Verify if the address gap is in 2GB range, because this uses
13561 * a relative jump.
13562 */
13563 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13564 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13565 if (abs(rel) > 0x7fffffff)
13566 return -ERANGE;
13567
13568 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13569 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13570
13571 /* Set probe function call */
13572 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13573 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13574
13575 /* Set returning jmp instruction at the tail of out-of-line buffer */
13576 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13577 - (u8 *)op->kp.addr + op->optinsn.size);
13578 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13579
13580 flush_icache_range((unsigned long) buf,
13581 (unsigned long) buf + TMPL_END_IDX +
13582 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13583 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13584
13585 /* Backup instructions which will be replaced by jump address */
13586 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13587 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13588 RELATIVE_ADDR_SIZE);
13589
13590 insn_buf[0] = RELATIVEJUMP_OPCODE;
13591 diff -urNp linux-2.6.39.4/arch/x86/kernel/ldt.c linux-2.6.39.4/arch/x86/kernel/ldt.c
13592 --- linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-05-19 00:06:34.000000000 -0400
13593 +++ linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-08-05 19:44:33.000000000 -0400
13594 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13595 if (reload) {
13596 #ifdef CONFIG_SMP
13597 preempt_disable();
13598 - load_LDT(pc);
13599 + load_LDT_nolock(pc);
13600 if (!cpumask_equal(mm_cpumask(current->mm),
13601 cpumask_of(smp_processor_id())))
13602 smp_call_function(flush_ldt, current->mm, 1);
13603 preempt_enable();
13604 #else
13605 - load_LDT(pc);
13606 + load_LDT_nolock(pc);
13607 #endif
13608 }
13609 if (oldsize) {
13610 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13611 return err;
13612
13613 for (i = 0; i < old->size; i++)
13614 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13615 + write_ldt_entry(new->ldt, i, old->ldt + i);
13616 return 0;
13617 }
13618
13619 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13620 retval = copy_ldt(&mm->context, &old_mm->context);
13621 mutex_unlock(&old_mm->context.lock);
13622 }
13623 +
13624 + if (tsk == current) {
13625 + mm->context.vdso = 0;
13626 +
13627 +#ifdef CONFIG_X86_32
13628 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13629 + mm->context.user_cs_base = 0UL;
13630 + mm->context.user_cs_limit = ~0UL;
13631 +
13632 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13633 + cpus_clear(mm->context.cpu_user_cs_mask);
13634 +#endif
13635 +
13636 +#endif
13637 +#endif
13638 +
13639 + }
13640 +
13641 return retval;
13642 }
13643
13644 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13645 }
13646 }
13647
13648 +#ifdef CONFIG_PAX_SEGMEXEC
13649 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13650 + error = -EINVAL;
13651 + goto out_unlock;
13652 + }
13653 +#endif
13654 +
13655 fill_ldt(&ldt, &ldt_info);
13656 if (oldmode)
13657 ldt.avl = 0;
13658 diff -urNp linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c
13659 --- linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-05-19 00:06:34.000000000 -0400
13660 +++ linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-08-05 19:44:33.000000000 -0400
13661 @@ -27,7 +27,7 @@
13662 #include <asm/cacheflush.h>
13663 #include <asm/debugreg.h>
13664
13665 -static void set_idt(void *newidt, __u16 limit)
13666 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13667 {
13668 struct desc_ptr curidt;
13669
13670 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13671 }
13672
13673
13674 -static void set_gdt(void *newgdt, __u16 limit)
13675 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13676 {
13677 struct desc_ptr curgdt;
13678
13679 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13680 }
13681
13682 control_page = page_address(image->control_code_page);
13683 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13684 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13685
13686 relocate_kernel_ptr = control_page;
13687 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13688 diff -urNp linux-2.6.39.4/arch/x86/kernel/microcode_intel.c linux-2.6.39.4/arch/x86/kernel/microcode_intel.c
13689 --- linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-05-19 00:06:34.000000000 -0400
13690 +++ linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-08-05 20:34:06.000000000 -0400
13691 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13692
13693 static int get_ucode_user(void *to, const void *from, size_t n)
13694 {
13695 - return copy_from_user(to, from, n);
13696 + return copy_from_user(to, (__force const void __user *)from, n);
13697 }
13698
13699 static enum ucode_state
13700 request_microcode_user(int cpu, const void __user *buf, size_t size)
13701 {
13702 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13703 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13704 }
13705
13706 static void microcode_fini_cpu(int cpu)
13707 diff -urNp linux-2.6.39.4/arch/x86/kernel/module.c linux-2.6.39.4/arch/x86/kernel/module.c
13708 --- linux-2.6.39.4/arch/x86/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
13709 +++ linux-2.6.39.4/arch/x86/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
13710 @@ -35,21 +35,66 @@
13711 #define DEBUGP(fmt...)
13712 #endif
13713
13714 -void *module_alloc(unsigned long size)
13715 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13716 {
13717 if (PAGE_ALIGN(size) > MODULES_LEN)
13718 return NULL;
13719 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13720 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13721 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13722 -1, __builtin_return_address(0));
13723 }
13724
13725 +void *module_alloc(unsigned long size)
13726 +{
13727 +
13728 +#ifdef CONFIG_PAX_KERNEXEC
13729 + return __module_alloc(size, PAGE_KERNEL);
13730 +#else
13731 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13732 +#endif
13733 +
13734 +}
13735 +
13736 /* Free memory returned from module_alloc */
13737 void module_free(struct module *mod, void *module_region)
13738 {
13739 vfree(module_region);
13740 }
13741
13742 +#ifdef CONFIG_PAX_KERNEXEC
13743 +#ifdef CONFIG_X86_32
13744 +void *module_alloc_exec(unsigned long size)
13745 +{
13746 + struct vm_struct *area;
13747 +
13748 + if (size == 0)
13749 + return NULL;
13750 +
13751 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13752 + return area ? area->addr : NULL;
13753 +}
13754 +EXPORT_SYMBOL(module_alloc_exec);
13755 +
13756 +void module_free_exec(struct module *mod, void *module_region)
13757 +{
13758 + vunmap(module_region);
13759 +}
13760 +EXPORT_SYMBOL(module_free_exec);
13761 +#else
13762 +void module_free_exec(struct module *mod, void *module_region)
13763 +{
13764 + module_free(mod, module_region);
13765 +}
13766 +EXPORT_SYMBOL(module_free_exec);
13767 +
13768 +void *module_alloc_exec(unsigned long size)
13769 +{
13770 + return __module_alloc(size, PAGE_KERNEL_RX);
13771 +}
13772 +EXPORT_SYMBOL(module_alloc_exec);
13773 +#endif
13774 +#endif
13775 +
13776 /* We don't need anything special. */
13777 int module_frob_arch_sections(Elf_Ehdr *hdr,
13778 Elf_Shdr *sechdrs,
13779 @@ -69,14 +114,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13780 unsigned int i;
13781 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13782 Elf32_Sym *sym;
13783 - uint32_t *location;
13784 + uint32_t *plocation, location;
13785
13786 DEBUGP("Applying relocate section %u to %u\n", relsec,
13787 sechdrs[relsec].sh_info);
13788 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13789 /* This is where to make the change */
13790 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13791 - + rel[i].r_offset;
13792 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13793 + location = (uint32_t)plocation;
13794 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13795 + plocation = ktla_ktva((void *)plocation);
13796 /* This is the symbol it is referring to. Note that all
13797 undefined symbols have been resolved. */
13798 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13799 @@ -85,11 +132,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13800 switch (ELF32_R_TYPE(rel[i].r_info)) {
13801 case R_386_32:
13802 /* We add the value into the location given */
13803 - *location += sym->st_value;
13804 + pax_open_kernel();
13805 + *plocation += sym->st_value;
13806 + pax_close_kernel();
13807 break;
13808 case R_386_PC32:
13809 /* Add the value, subtract its postition */
13810 - *location += sym->st_value - (uint32_t)location;
13811 + pax_open_kernel();
13812 + *plocation += sym->st_value - location;
13813 + pax_close_kernel();
13814 break;
13815 default:
13816 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13817 @@ -145,21 +196,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13818 case R_X86_64_NONE:
13819 break;
13820 case R_X86_64_64:
13821 + pax_open_kernel();
13822 *(u64 *)loc = val;
13823 + pax_close_kernel();
13824 break;
13825 case R_X86_64_32:
13826 + pax_open_kernel();
13827 *(u32 *)loc = val;
13828 + pax_close_kernel();
13829 if (val != *(u32 *)loc)
13830 goto overflow;
13831 break;
13832 case R_X86_64_32S:
13833 + pax_open_kernel();
13834 *(s32 *)loc = val;
13835 + pax_close_kernel();
13836 if ((s64)val != *(s32 *)loc)
13837 goto overflow;
13838 break;
13839 case R_X86_64_PC32:
13840 val -= (u64)loc;
13841 + pax_open_kernel();
13842 *(u32 *)loc = val;
13843 + pax_close_kernel();
13844 +
13845 #if 0
13846 if ((s64)val != *(s32 *)loc)
13847 goto overflow;
13848 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt.c linux-2.6.39.4/arch/x86/kernel/paravirt.c
13849 --- linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-05-19 00:06:34.000000000 -0400
13850 +++ linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-08-05 19:44:33.000000000 -0400
13851 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13852 {
13853 return x;
13854 }
13855 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13856 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13857 +#endif
13858
13859 void __init default_banner(void)
13860 {
13861 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13862 * corresponding structure. */
13863 static void *get_call_destination(u8 type)
13864 {
13865 - struct paravirt_patch_template tmpl = {
13866 + const struct paravirt_patch_template tmpl = {
13867 .pv_init_ops = pv_init_ops,
13868 .pv_time_ops = pv_time_ops,
13869 .pv_cpu_ops = pv_cpu_ops,
13870 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13871 .pv_lock_ops = pv_lock_ops,
13872 #endif
13873 };
13874 +
13875 + pax_track_stack();
13876 +
13877 return *((void **)&tmpl + type);
13878 }
13879
13880 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13881 if (opfunc == NULL)
13882 /* If there's no function, patch it with a ud2a (BUG) */
13883 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13884 - else if (opfunc == _paravirt_nop)
13885 + else if (opfunc == (void *)_paravirt_nop)
13886 /* If the operation is a nop, then nop the callsite */
13887 ret = paravirt_patch_nop();
13888
13889 /* identity functions just return their single argument */
13890 - else if (opfunc == _paravirt_ident_32)
13891 + else if (opfunc == (void *)_paravirt_ident_32)
13892 ret = paravirt_patch_ident_32(insnbuf, len);
13893 - else if (opfunc == _paravirt_ident_64)
13894 + else if (opfunc == (void *)_paravirt_ident_64)
13895 ret = paravirt_patch_ident_64(insnbuf, len);
13896 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13897 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13898 + ret = paravirt_patch_ident_64(insnbuf, len);
13899 +#endif
13900
13901 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13902 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13903 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13904 if (insn_len > len || start == NULL)
13905 insn_len = len;
13906 else
13907 - memcpy(insnbuf, start, insn_len);
13908 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13909
13910 return insn_len;
13911 }
13912 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13913 preempt_enable();
13914 }
13915
13916 -struct pv_info pv_info = {
13917 +struct pv_info pv_info __read_only = {
13918 .name = "bare hardware",
13919 .paravirt_enabled = 0,
13920 .kernel_rpl = 0,
13921 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13922 };
13923
13924 -struct pv_init_ops pv_init_ops = {
13925 +struct pv_init_ops pv_init_ops __read_only = {
13926 .patch = native_patch,
13927 };
13928
13929 -struct pv_time_ops pv_time_ops = {
13930 +struct pv_time_ops pv_time_ops __read_only = {
13931 .sched_clock = native_sched_clock,
13932 };
13933
13934 -struct pv_irq_ops pv_irq_ops = {
13935 +struct pv_irq_ops pv_irq_ops __read_only = {
13936 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13937 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13938 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13939 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13940 #endif
13941 };
13942
13943 -struct pv_cpu_ops pv_cpu_ops = {
13944 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13945 .cpuid = native_cpuid,
13946 .get_debugreg = native_get_debugreg,
13947 .set_debugreg = native_set_debugreg,
13948 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13949 .end_context_switch = paravirt_nop,
13950 };
13951
13952 -struct pv_apic_ops pv_apic_ops = {
13953 +struct pv_apic_ops pv_apic_ops __read_only = {
13954 #ifdef CONFIG_X86_LOCAL_APIC
13955 .startup_ipi_hook = paravirt_nop,
13956 #endif
13957 };
13958
13959 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13960 +#ifdef CONFIG_X86_32
13961 +#ifdef CONFIG_X86_PAE
13962 +/* 64-bit pagetable entries */
13963 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
13964 +#else
13965 /* 32-bit pagetable entries */
13966 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
13967 +#endif
13968 #else
13969 /* 64-bit pagetable entries */
13970 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13971 #endif
13972
13973 -struct pv_mmu_ops pv_mmu_ops = {
13974 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13975
13976 .read_cr2 = native_read_cr2,
13977 .write_cr2 = native_write_cr2,
13978 @@ -465,6 +480,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13979 },
13980
13981 .set_fixmap = native_set_fixmap,
13982 +
13983 +#ifdef CONFIG_PAX_KERNEXEC
13984 + .pax_open_kernel = native_pax_open_kernel,
13985 + .pax_close_kernel = native_pax_close_kernel,
13986 +#endif
13987 +
13988 };
13989
13990 EXPORT_SYMBOL_GPL(pv_time_ops);
13991 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c
13992 --- linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-05-19 00:06:34.000000000 -0400
13993 +++ linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-05 19:44:33.000000000 -0400
13994 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13995 arch_spin_lock(lock);
13996 }
13997
13998 -struct pv_lock_ops pv_lock_ops = {
13999 +struct pv_lock_ops pv_lock_ops __read_only = {
14000 #ifdef CONFIG_SMP
14001 .spin_is_locked = __ticket_spin_is_locked,
14002 .spin_is_contended = __ticket_spin_is_contended,
14003 diff -urNp linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c
14004 --- linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-05-19 00:06:34.000000000 -0400
14005 +++ linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-08-05 19:44:35.000000000 -0400
14006 @@ -2,7 +2,7 @@
14007 #include <asm/iommu_table.h>
14008 #include <linux/string.h>
14009 #include <linux/kallsyms.h>
14010 -
14011 +#include <linux/sched.h>
14012
14013 #define DEBUG 1
14014
14015 @@ -53,6 +53,8 @@ void __init check_iommu_entries(struct i
14016 char sym_p[KSYM_SYMBOL_LEN];
14017 char sym_q[KSYM_SYMBOL_LEN];
14018
14019 + pax_track_stack();
14020 +
14021 /* Simple cyclic dependency checker. */
14022 for (p = start; p < finish; p++) {
14023 q = find_dependents_of(start, finish, p);
14024 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_32.c linux-2.6.39.4/arch/x86/kernel/process_32.c
14025 --- linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-06-25 12:55:22.000000000 -0400
14026 +++ linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-08-05 19:44:35.000000000 -0400
14027 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14028 unsigned long thread_saved_pc(struct task_struct *tsk)
14029 {
14030 return ((unsigned long *)tsk->thread.sp)[3];
14031 +//XXX return tsk->thread.eip;
14032 }
14033
14034 #ifndef CONFIG_SMP
14035 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14036 unsigned long sp;
14037 unsigned short ss, gs;
14038
14039 - if (user_mode_vm(regs)) {
14040 + if (user_mode(regs)) {
14041 sp = regs->sp;
14042 ss = regs->ss & 0xffff;
14043 - gs = get_user_gs(regs);
14044 } else {
14045 sp = kernel_stack_pointer(regs);
14046 savesegment(ss, ss);
14047 - savesegment(gs, gs);
14048 }
14049 + gs = get_user_gs(regs);
14050
14051 show_regs_common();
14052
14053 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14054 struct task_struct *tsk;
14055 int err;
14056
14057 - childregs = task_pt_regs(p);
14058 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14059 *childregs = *regs;
14060 childregs->ax = 0;
14061 childregs->sp = sp;
14062
14063 p->thread.sp = (unsigned long) childregs;
14064 p->thread.sp0 = (unsigned long) (childregs+1);
14065 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14066
14067 p->thread.ip = (unsigned long) ret_from_fork;
14068
14069 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14070 struct thread_struct *prev = &prev_p->thread,
14071 *next = &next_p->thread;
14072 int cpu = smp_processor_id();
14073 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14074 + struct tss_struct *tss = init_tss + cpu;
14075 bool preload_fpu;
14076
14077 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14078 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14079 */
14080 lazy_save_gs(prev->gs);
14081
14082 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14083 + __set_fs(task_thread_info(next_p)->addr_limit);
14084 +#endif
14085 +
14086 /*
14087 * Load the per-thread Thread-Local Storage descriptor.
14088 */
14089 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14090 */
14091 arch_end_context_switch(next_p);
14092
14093 + percpu_write(current_task, next_p);
14094 + percpu_write(current_tinfo, &next_p->tinfo);
14095 +
14096 if (preload_fpu)
14097 __math_state_restore();
14098
14099 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14100 if (prev->gs | next->gs)
14101 lazy_load_gs(next->gs);
14102
14103 - percpu_write(current_task, next_p);
14104 -
14105 return prev_p;
14106 }
14107
14108 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14109 } while (count++ < 16);
14110 return 0;
14111 }
14112 -
14113 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_64.c linux-2.6.39.4/arch/x86/kernel/process_64.c
14114 --- linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-06-25 12:55:22.000000000 -0400
14115 +++ linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-08-05 19:44:35.000000000 -0400
14116 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14117 void exit_idle(void)
14118 {
14119 /* idle loop has pid 0 */
14120 - if (current->pid)
14121 + if (task_pid_nr(current))
14122 return;
14123 __exit_idle();
14124 }
14125 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14126 struct pt_regs *childregs;
14127 struct task_struct *me = current;
14128
14129 - childregs = ((struct pt_regs *)
14130 - (THREAD_SIZE + task_stack_page(p))) - 1;
14131 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14132 *childregs = *regs;
14133
14134 childregs->ax = 0;
14135 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14136 p->thread.sp = (unsigned long) childregs;
14137 p->thread.sp0 = (unsigned long) (childregs+1);
14138 p->thread.usersp = me->thread.usersp;
14139 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14140
14141 set_tsk_thread_flag(p, TIF_FORK);
14142
14143 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14144 struct thread_struct *prev = &prev_p->thread;
14145 struct thread_struct *next = &next_p->thread;
14146 int cpu = smp_processor_id();
14147 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14148 + struct tss_struct *tss = init_tss + cpu;
14149 unsigned fsindex, gsindex;
14150 bool preload_fpu;
14151
14152 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14153 prev->usersp = percpu_read(old_rsp);
14154 percpu_write(old_rsp, next->usersp);
14155 percpu_write(current_task, next_p);
14156 + percpu_write(current_tinfo, &next_p->tinfo);
14157
14158 - percpu_write(kernel_stack,
14159 - (unsigned long)task_stack_page(next_p) +
14160 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14161 + percpu_write(kernel_stack, next->sp0);
14162
14163 /*
14164 * Now maybe reload the debug registers and handle I/O bitmaps
14165 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14166 if (!p || p == current || p->state == TASK_RUNNING)
14167 return 0;
14168 stack = (unsigned long)task_stack_page(p);
14169 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14170 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14171 return 0;
14172 fp = *(u64 *)(p->thread.sp);
14173 do {
14174 - if (fp < (unsigned long)stack ||
14175 - fp >= (unsigned long)stack+THREAD_SIZE)
14176 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14177 return 0;
14178 ip = *(u64 *)(fp+8);
14179 if (!in_sched_functions(ip))
14180 diff -urNp linux-2.6.39.4/arch/x86/kernel/process.c linux-2.6.39.4/arch/x86/kernel/process.c
14181 --- linux-2.6.39.4/arch/x86/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
14182 +++ linux-2.6.39.4/arch/x86/kernel/process.c 2011-08-05 19:44:35.000000000 -0400
14183 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14184
14185 void free_thread_info(struct thread_info *ti)
14186 {
14187 - free_thread_xstate(ti->task);
14188 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14189 }
14190
14191 +static struct kmem_cache *task_struct_cachep;
14192 +
14193 void arch_task_cache_init(void)
14194 {
14195 - task_xstate_cachep =
14196 - kmem_cache_create("task_xstate", xstate_size,
14197 + /* create a slab on which task_structs can be allocated */
14198 + task_struct_cachep =
14199 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14200 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14201 +
14202 + task_xstate_cachep =
14203 + kmem_cache_create("task_xstate", xstate_size,
14204 __alignof__(union thread_xstate),
14205 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14206 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14207 +}
14208 +
14209 +struct task_struct *alloc_task_struct_node(int node)
14210 +{
14211 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14212 +}
14213 +
14214 +void free_task_struct(struct task_struct *task)
14215 +{
14216 + free_thread_xstate(task);
14217 + kmem_cache_free(task_struct_cachep, task);
14218 }
14219
14220 /*
14221 @@ -70,7 +87,7 @@ void exit_thread(void)
14222 unsigned long *bp = t->io_bitmap_ptr;
14223
14224 if (bp) {
14225 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14226 + struct tss_struct *tss = init_tss + get_cpu();
14227
14228 t->io_bitmap_ptr = NULL;
14229 clear_thread_flag(TIF_IO_BITMAP);
14230 @@ -106,7 +123,7 @@ void show_regs_common(void)
14231
14232 printk(KERN_CONT "\n");
14233 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14234 - current->pid, current->comm, print_tainted(),
14235 + task_pid_nr(current), current->comm, print_tainted(),
14236 init_utsname()->release,
14237 (int)strcspn(init_utsname()->version, " "),
14238 init_utsname()->version);
14239 @@ -120,6 +137,9 @@ void flush_thread(void)
14240 {
14241 struct task_struct *tsk = current;
14242
14243 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14244 + loadsegment(gs, 0);
14245 +#endif
14246 flush_ptrace_hw_breakpoint(tsk);
14247 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14248 /*
14249 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14250 regs.di = (unsigned long) arg;
14251
14252 #ifdef CONFIG_X86_32
14253 - regs.ds = __USER_DS;
14254 - regs.es = __USER_DS;
14255 + regs.ds = __KERNEL_DS;
14256 + regs.es = __KERNEL_DS;
14257 regs.fs = __KERNEL_PERCPU;
14258 - regs.gs = __KERNEL_STACK_CANARY;
14259 + savesegment(gs, regs.gs);
14260 #else
14261 regs.ss = __KERNEL_DS;
14262 #endif
14263 @@ -401,7 +421,7 @@ void default_idle(void)
14264 EXPORT_SYMBOL(default_idle);
14265 #endif
14266
14267 -void stop_this_cpu(void *dummy)
14268 +__noreturn void stop_this_cpu(void *dummy)
14269 {
14270 local_irq_disable();
14271 /*
14272 @@ -665,16 +685,34 @@ static int __init idle_setup(char *str)
14273 }
14274 early_param("idle", idle_setup);
14275
14276 -unsigned long arch_align_stack(unsigned long sp)
14277 +#ifdef CONFIG_PAX_RANDKSTACK
14278 +asmlinkage void pax_randomize_kstack(void)
14279 {
14280 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14281 - sp -= get_random_int() % 8192;
14282 - return sp & ~0xf;
14283 -}
14284 + struct thread_struct *thread = &current->thread;
14285 + unsigned long time;
14286
14287 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14288 -{
14289 - unsigned long range_end = mm->brk + 0x02000000;
14290 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14291 -}
14292 + if (!randomize_va_space)
14293 + return;
14294 +
14295 + rdtscl(time);
14296 +
14297 + /* P4 seems to return a 0 LSB, ignore it */
14298 +#ifdef CONFIG_MPENTIUM4
14299 + time &= 0x3EUL;
14300 + time <<= 2;
14301 +#elif defined(CONFIG_X86_64)
14302 + time &= 0xFUL;
14303 + time <<= 4;
14304 +#else
14305 + time &= 0x1FUL;
14306 + time <<= 3;
14307 +#endif
14308 +
14309 + thread->sp0 ^= time;
14310 + load_sp0(init_tss + smp_processor_id(), thread);
14311
14312 +#ifdef CONFIG_X86_64
14313 + percpu_write(kernel_stack, thread->sp0);
14314 +#endif
14315 +}
14316 +#endif
14317 diff -urNp linux-2.6.39.4/arch/x86/kernel/ptrace.c linux-2.6.39.4/arch/x86/kernel/ptrace.c
14318 --- linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
14319 +++ linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-08-05 19:44:35.000000000 -0400
14320 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14321 unsigned long addr, unsigned long data)
14322 {
14323 int ret;
14324 - unsigned long __user *datap = (unsigned long __user *)data;
14325 + unsigned long __user *datap = (__force unsigned long __user *)data;
14326
14327 switch (request) {
14328 /* read the word at location addr in the USER area. */
14329 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14330 if ((int) addr < 0)
14331 return -EIO;
14332 ret = do_get_thread_area(child, addr,
14333 - (struct user_desc __user *)data);
14334 + (__force struct user_desc __user *) data);
14335 break;
14336
14337 case PTRACE_SET_THREAD_AREA:
14338 if ((int) addr < 0)
14339 return -EIO;
14340 ret = do_set_thread_area(child, addr,
14341 - (struct user_desc __user *)data, 0);
14342 + (__force struct user_desc __user *) data, 0);
14343 break;
14344 #endif
14345
14346 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14347 memset(info, 0, sizeof(*info));
14348 info->si_signo = SIGTRAP;
14349 info->si_code = si_code;
14350 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14351 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14352 }
14353
14354 void user_single_step_siginfo(struct task_struct *tsk,
14355 @@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *ts
14356 * We must return the syscall number to actually look up in the table.
14357 * This can be -1L to skip running any syscall at all.
14358 */
14359 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
14360 +long syscall_trace_enter(struct pt_regs *regs)
14361 {
14362 long ret = 0;
14363
14364 @@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(stru
14365 return ret ?: regs->orig_ax;
14366 }
14367
14368 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
14369 +void syscall_trace_leave(struct pt_regs *regs)
14370 {
14371 bool step;
14372
14373 diff -urNp linux-2.6.39.4/arch/x86/kernel/pvclock.c linux-2.6.39.4/arch/x86/kernel/pvclock.c
14374 --- linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-05-19 00:06:34.000000000 -0400
14375 +++ linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-08-05 19:44:35.000000000 -0400
14376 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14377 return pv_tsc_khz;
14378 }
14379
14380 -static atomic64_t last_value = ATOMIC64_INIT(0);
14381 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14382
14383 void pvclock_resume(void)
14384 {
14385 - atomic64_set(&last_value, 0);
14386 + atomic64_set_unchecked(&last_value, 0);
14387 }
14388
14389 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14390 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14391 * updating at the same time, and one of them could be slightly behind,
14392 * making the assumption that last_value always go forward fail to hold.
14393 */
14394 - last = atomic64_read(&last_value);
14395 + last = atomic64_read_unchecked(&last_value);
14396 do {
14397 if (ret < last)
14398 return last;
14399 - last = atomic64_cmpxchg(&last_value, last, ret);
14400 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14401 } while (unlikely(last != ret));
14402
14403 return ret;
14404 diff -urNp linux-2.6.39.4/arch/x86/kernel/reboot.c linux-2.6.39.4/arch/x86/kernel/reboot.c
14405 --- linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:11:51.000000000 -0400
14406 +++ linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:12:20.000000000 -0400
14407 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14408 EXPORT_SYMBOL(pm_power_off);
14409
14410 static const struct desc_ptr no_idt = {};
14411 -static int reboot_mode;
14412 +static unsigned short reboot_mode;
14413 enum reboot_type reboot_type = BOOT_KBD;
14414 int reboot_force;
14415
14416 @@ -307,13 +307,17 @@ core_initcall(reboot_init);
14417 extern const unsigned char machine_real_restart_asm[];
14418 extern const u64 machine_real_restart_gdt[3];
14419
14420 -void machine_real_restart(unsigned int type)
14421 +__noreturn void machine_real_restart(unsigned int type)
14422 {
14423 void *restart_va;
14424 unsigned long restart_pa;
14425 - void (*restart_lowmem)(unsigned int);
14426 + void (* __noreturn restart_lowmem)(unsigned int);
14427 u64 *lowmem_gdt;
14428
14429 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14430 + struct desc_struct *gdt;
14431 +#endif
14432 +
14433 local_irq_disable();
14434
14435 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14436 @@ -339,14 +343,14 @@ void machine_real_restart(unsigned int t
14437 boot)". This seems like a fairly standard thing that gets set by
14438 REBOOT.COM programs, and the previous reset routine did this
14439 too. */
14440 - *((unsigned short *)0x472) = reboot_mode;
14441 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14442
14443 /* Patch the GDT in the low memory trampoline */
14444 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14445
14446 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14447 restart_pa = virt_to_phys(restart_va);
14448 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14449 + restart_lowmem = (void *)restart_pa;
14450
14451 /* GDT[0]: GDT self-pointer */
14452 lowmem_gdt[0] =
14453 @@ -357,7 +361,33 @@ void machine_real_restart(unsigned int t
14454 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14455
14456 /* Jump to the identity-mapped low memory code */
14457 +
14458 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14459 + gdt = get_cpu_gdt_table(smp_processor_id());
14460 + pax_open_kernel();
14461 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14462 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14463 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14464 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14465 +#endif
14466 +#ifdef CONFIG_PAX_KERNEXEC
14467 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14468 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14469 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14470 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14471 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14472 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14473 +#endif
14474 + pax_close_kernel();
14475 +#endif
14476 +
14477 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14478 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14479 + unreachable();
14480 +#else
14481 restart_lowmem(type);
14482 +#endif
14483 +
14484 }
14485 #ifdef CONFIG_APM_MODULE
14486 EXPORT_SYMBOL(machine_real_restart);
14487 @@ -486,7 +516,7 @@ void __attribute__((weak)) mach_reboot_f
14488 {
14489 }
14490
14491 -static void native_machine_emergency_restart(void)
14492 +__noreturn static void native_machine_emergency_restart(void)
14493 {
14494 int i;
14495
14496 @@ -601,13 +631,13 @@ void native_machine_shutdown(void)
14497 #endif
14498 }
14499
14500 -static void __machine_emergency_restart(int emergency)
14501 +static __noreturn void __machine_emergency_restart(int emergency)
14502 {
14503 reboot_emergency = emergency;
14504 machine_ops.emergency_restart();
14505 }
14506
14507 -static void native_machine_restart(char *__unused)
14508 +static __noreturn void native_machine_restart(char *__unused)
14509 {
14510 printk("machine restart\n");
14511
14512 @@ -616,7 +646,7 @@ static void native_machine_restart(char
14513 __machine_emergency_restart(0);
14514 }
14515
14516 -static void native_machine_halt(void)
14517 +static __noreturn void native_machine_halt(void)
14518 {
14519 /* stop other cpus and apics */
14520 machine_shutdown();
14521 @@ -627,7 +657,7 @@ static void native_machine_halt(void)
14522 stop_this_cpu(NULL);
14523 }
14524
14525 -static void native_machine_power_off(void)
14526 +__noreturn static void native_machine_power_off(void)
14527 {
14528 if (pm_power_off) {
14529 if (!reboot_force)
14530 @@ -636,6 +666,7 @@ static void native_machine_power_off(voi
14531 }
14532 /* a fallback in case there is no PM info available */
14533 tboot_shutdown(TB_SHUTDOWN_HALT);
14534 + unreachable();
14535 }
14536
14537 struct machine_ops machine_ops = {
14538 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup.c linux-2.6.39.4/arch/x86/kernel/setup.c
14539 --- linux-2.6.39.4/arch/x86/kernel/setup.c 2011-06-25 12:55:22.000000000 -0400
14540 +++ linux-2.6.39.4/arch/x86/kernel/setup.c 2011-08-05 19:44:35.000000000 -0400
14541 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14542 * area (640->1Mb) as ram even though it is not.
14543 * take them out.
14544 */
14545 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14546 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14547 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14548 }
14549
14550 @@ -775,14 +775,14 @@ void __init setup_arch(char **cmdline_p)
14551
14552 if (!boot_params.hdr.root_flags)
14553 root_mountflags &= ~MS_RDONLY;
14554 - init_mm.start_code = (unsigned long) _text;
14555 - init_mm.end_code = (unsigned long) _etext;
14556 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14557 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14558 init_mm.end_data = (unsigned long) _edata;
14559 init_mm.brk = _brk_end;
14560
14561 - code_resource.start = virt_to_phys(_text);
14562 - code_resource.end = virt_to_phys(_etext)-1;
14563 - data_resource.start = virt_to_phys(_etext);
14564 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14565 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14566 + data_resource.start = virt_to_phys(_sdata);
14567 data_resource.end = virt_to_phys(_edata)-1;
14568 bss_resource.start = virt_to_phys(&__bss_start);
14569 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14570 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup_percpu.c linux-2.6.39.4/arch/x86/kernel/setup_percpu.c
14571 --- linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-05-19 00:06:34.000000000 -0400
14572 +++ linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-08-05 19:44:35.000000000 -0400
14573 @@ -21,19 +21,17 @@
14574 #include <asm/cpu.h>
14575 #include <asm/stackprotector.h>
14576
14577 -DEFINE_PER_CPU(int, cpu_number);
14578 +#ifdef CONFIG_SMP
14579 +DEFINE_PER_CPU(unsigned int, cpu_number);
14580 EXPORT_PER_CPU_SYMBOL(cpu_number);
14581 +#endif
14582
14583 -#ifdef CONFIG_X86_64
14584 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14585 -#else
14586 -#define BOOT_PERCPU_OFFSET 0
14587 -#endif
14588
14589 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14590 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14591
14592 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14593 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14594 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14595 };
14596 EXPORT_SYMBOL(__per_cpu_offset);
14597 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14598 {
14599 #ifdef CONFIG_X86_32
14600 struct desc_struct gdt;
14601 + unsigned long base = per_cpu_offset(cpu);
14602
14603 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14604 - 0x2 | DESCTYPE_S, 0x8);
14605 - gdt.s = 1;
14606 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14607 + 0x83 | DESCTYPE_S, 0xC);
14608 write_gdt_entry(get_cpu_gdt_table(cpu),
14609 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14610 #endif
14611 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14612 /* alrighty, percpu areas up and running */
14613 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14614 for_each_possible_cpu(cpu) {
14615 +#ifdef CONFIG_CC_STACKPROTECTOR
14616 +#ifdef CONFIG_X86_32
14617 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14618 +#endif
14619 +#endif
14620 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14621 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14622 per_cpu(cpu_number, cpu) = cpu;
14623 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14624 */
14625 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14626 #endif
14627 +#ifdef CONFIG_CC_STACKPROTECTOR
14628 +#ifdef CONFIG_X86_32
14629 + if (!cpu)
14630 + per_cpu(stack_canary.canary, cpu) = canary;
14631 +#endif
14632 +#endif
14633 /*
14634 * Up to this point, the boot CPU has been using .init.data
14635 * area. Reload any changed state for the boot CPU.
14636 diff -urNp linux-2.6.39.4/arch/x86/kernel/signal.c linux-2.6.39.4/arch/x86/kernel/signal.c
14637 --- linux-2.6.39.4/arch/x86/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
14638 +++ linux-2.6.39.4/arch/x86/kernel/signal.c 2011-08-05 19:44:35.000000000 -0400
14639 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14640 * Align the stack pointer according to the i386 ABI,
14641 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14642 */
14643 - sp = ((sp + 4) & -16ul) - 4;
14644 + sp = ((sp - 12) & -16ul) - 4;
14645 #else /* !CONFIG_X86_32 */
14646 sp = round_down(sp, 16) - 8;
14647 #endif
14648 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14649 * Return an always-bogus address instead so we will die with SIGSEGV.
14650 */
14651 if (onsigstack && !likely(on_sig_stack(sp)))
14652 - return (void __user *)-1L;
14653 + return (__force void __user *)-1L;
14654
14655 /* save i387 state */
14656 if (used_math() && save_i387_xstate(*fpstate) < 0)
14657 - return (void __user *)-1L;
14658 + return (__force void __user *)-1L;
14659
14660 return (void __user *)sp;
14661 }
14662 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14663 }
14664
14665 if (current->mm->context.vdso)
14666 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14667 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14668 else
14669 - restorer = &frame->retcode;
14670 + restorer = (void __user *)&frame->retcode;
14671 if (ka->sa.sa_flags & SA_RESTORER)
14672 restorer = ka->sa.sa_restorer;
14673
14674 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14675 * reasons and because gdb uses it as a signature to notice
14676 * signal handler stack frames.
14677 */
14678 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14679 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14680
14681 if (err)
14682 return -EFAULT;
14683 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14684 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14685
14686 /* Set up to return from userspace. */
14687 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14688 + if (current->mm->context.vdso)
14689 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14690 + else
14691 + restorer = (void __user *)&frame->retcode;
14692 if (ka->sa.sa_flags & SA_RESTORER)
14693 restorer = ka->sa.sa_restorer;
14694 put_user_ex(restorer, &frame->pretcode);
14695 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14696 * reasons and because gdb uses it as a signature to notice
14697 * signal handler stack frames.
14698 */
14699 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14700 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14701 } put_user_catch(err);
14702
14703 if (err)
14704 @@ -773,6 +776,8 @@ static void do_signal(struct pt_regs *re
14705 int signr;
14706 sigset_t *oldset;
14707
14708 + pax_track_stack();
14709 +
14710 /*
14711 * We want the common case to go fast, which is why we may in certain
14712 * cases get here from kernel mode. Just return without doing anything
14713 @@ -780,7 +785,7 @@ static void do_signal(struct pt_regs *re
14714 * X86_32: vm86 regs switched out by assembly code before reaching
14715 * here, so testing against kernel CS suffices.
14716 */
14717 - if (!user_mode(regs))
14718 + if (!user_mode_novm(regs))
14719 return;
14720
14721 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14722 diff -urNp linux-2.6.39.4/arch/x86/kernel/smpboot.c linux-2.6.39.4/arch/x86/kernel/smpboot.c
14723 --- linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-06-25 12:55:22.000000000 -0400
14724 +++ linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-08-05 19:44:35.000000000 -0400
14725 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14726 set_idle_for_cpu(cpu, c_idle.idle);
14727 do_rest:
14728 per_cpu(current_task, cpu) = c_idle.idle;
14729 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14730 #ifdef CONFIG_X86_32
14731 /* Stack for startup_32 can be just as for start_secondary onwards */
14732 irq_ctx_init(cpu);
14733 #else
14734 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14735 initial_gs = per_cpu_offset(cpu);
14736 - per_cpu(kernel_stack, cpu) =
14737 - (unsigned long)task_stack_page(c_idle.idle) -
14738 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14739 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14740 #endif
14741 +
14742 + pax_open_kernel();
14743 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14744 + pax_close_kernel();
14745 +
14746 initial_code = (unsigned long)start_secondary;
14747 stack_start = c_idle.idle->thread.sp;
14748
14749 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14750
14751 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14752
14753 +#ifdef CONFIG_PAX_PER_CPU_PGD
14754 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14755 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14756 + KERNEL_PGD_PTRS);
14757 +#endif
14758 +
14759 err = do_boot_cpu(apicid, cpu);
14760 if (err) {
14761 pr_debug("do_boot_cpu failed %d\n", err);
14762 diff -urNp linux-2.6.39.4/arch/x86/kernel/step.c linux-2.6.39.4/arch/x86/kernel/step.c
14763 --- linux-2.6.39.4/arch/x86/kernel/step.c 2011-05-19 00:06:34.000000000 -0400
14764 +++ linux-2.6.39.4/arch/x86/kernel/step.c 2011-08-05 19:44:35.000000000 -0400
14765 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14766 struct desc_struct *desc;
14767 unsigned long base;
14768
14769 - seg &= ~7UL;
14770 + seg >>= 3;
14771
14772 mutex_lock(&child->mm->context.lock);
14773 - if (unlikely((seg >> 3) >= child->mm->context.size))
14774 + if (unlikely(seg >= child->mm->context.size))
14775 addr = -1L; /* bogus selector, access would fault */
14776 else {
14777 desc = child->mm->context.ldt + seg;
14778 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14779 addr += base;
14780 }
14781 mutex_unlock(&child->mm->context.lock);
14782 - }
14783 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14784 + addr = ktla_ktva(addr);
14785
14786 return addr;
14787 }
14788 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14789 unsigned char opcode[15];
14790 unsigned long addr = convert_ip_to_linear(child, regs);
14791
14792 + if (addr == -EINVAL)
14793 + return 0;
14794 +
14795 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14796 for (i = 0; i < copied; i++) {
14797 switch (opcode[i]) {
14798 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14799
14800 #ifdef CONFIG_X86_64
14801 case 0x40 ... 0x4f:
14802 - if (regs->cs != __USER_CS)
14803 + if ((regs->cs & 0xffff) != __USER_CS)
14804 /* 32-bit mode: register increment */
14805 return 0;
14806 /* 64-bit mode: REX prefix */
14807 diff -urNp linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S
14808 --- linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-05-19 00:06:34.000000000 -0400
14809 +++ linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-08-05 19:44:35.000000000 -0400
14810 @@ -1,3 +1,4 @@
14811 +.section .rodata,"a",@progbits
14812 ENTRY(sys_call_table)
14813 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14814 .long sys_exit
14815 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c
14816 --- linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-05-19 00:06:34.000000000 -0400
14817 +++ linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-08-05 19:44:35.000000000 -0400
14818 @@ -24,17 +24,224 @@
14819
14820 #include <asm/syscalls.h>
14821
14822 -/*
14823 - * Do a system call from kernel instead of calling sys_execve so we
14824 - * end up with proper pt_regs.
14825 - */
14826 -int kernel_execve(const char *filename,
14827 - const char *const argv[],
14828 - const char *const envp[])
14829 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14830 {
14831 - long __res;
14832 - asm volatile ("int $0x80"
14833 - : "=a" (__res)
14834 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14835 - return __res;
14836 + unsigned long pax_task_size = TASK_SIZE;
14837 +
14838 +#ifdef CONFIG_PAX_SEGMEXEC
14839 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14840 + pax_task_size = SEGMEXEC_TASK_SIZE;
14841 +#endif
14842 +
14843 + if (len > pax_task_size || addr > pax_task_size - len)
14844 + return -EINVAL;
14845 +
14846 + return 0;
14847 +}
14848 +
14849 +unsigned long
14850 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14851 + unsigned long len, unsigned long pgoff, unsigned long flags)
14852 +{
14853 + struct mm_struct *mm = current->mm;
14854 + struct vm_area_struct *vma;
14855 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14856 +
14857 +#ifdef CONFIG_PAX_SEGMEXEC
14858 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14859 + pax_task_size = SEGMEXEC_TASK_SIZE;
14860 +#endif
14861 +
14862 + pax_task_size -= PAGE_SIZE;
14863 +
14864 + if (len > pax_task_size)
14865 + return -ENOMEM;
14866 +
14867 + if (flags & MAP_FIXED)
14868 + return addr;
14869 +
14870 +#ifdef CONFIG_PAX_RANDMMAP
14871 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14872 +#endif
14873 +
14874 + if (addr) {
14875 + addr = PAGE_ALIGN(addr);
14876 + if (pax_task_size - len >= addr) {
14877 + vma = find_vma(mm, addr);
14878 + if (check_heap_stack_gap(vma, addr, len))
14879 + return addr;
14880 + }
14881 + }
14882 + if (len > mm->cached_hole_size) {
14883 + start_addr = addr = mm->free_area_cache;
14884 + } else {
14885 + start_addr = addr = mm->mmap_base;
14886 + mm->cached_hole_size = 0;
14887 + }
14888 +
14889 +#ifdef CONFIG_PAX_PAGEEXEC
14890 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14891 + start_addr = 0x00110000UL;
14892 +
14893 +#ifdef CONFIG_PAX_RANDMMAP
14894 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14895 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14896 +#endif
14897 +
14898 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14899 + start_addr = addr = mm->mmap_base;
14900 + else
14901 + addr = start_addr;
14902 + }
14903 +#endif
14904 +
14905 +full_search:
14906 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14907 + /* At this point: (!vma || addr < vma->vm_end). */
14908 + if (pax_task_size - len < addr) {
14909 + /*
14910 + * Start a new search - just in case we missed
14911 + * some holes.
14912 + */
14913 + if (start_addr != mm->mmap_base) {
14914 + start_addr = addr = mm->mmap_base;
14915 + mm->cached_hole_size = 0;
14916 + goto full_search;
14917 + }
14918 + return -ENOMEM;
14919 + }
14920 + if (check_heap_stack_gap(vma, addr, len))
14921 + break;
14922 + if (addr + mm->cached_hole_size < vma->vm_start)
14923 + mm->cached_hole_size = vma->vm_start - addr;
14924 + addr = vma->vm_end;
14925 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14926 + start_addr = addr = mm->mmap_base;
14927 + mm->cached_hole_size = 0;
14928 + goto full_search;
14929 + }
14930 + }
14931 +
14932 + /*
14933 + * Remember the place where we stopped the search:
14934 + */
14935 + mm->free_area_cache = addr + len;
14936 + return addr;
14937 +}
14938 +
14939 +unsigned long
14940 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14941 + const unsigned long len, const unsigned long pgoff,
14942 + const unsigned long flags)
14943 +{
14944 + struct vm_area_struct *vma;
14945 + struct mm_struct *mm = current->mm;
14946 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14947 +
14948 +#ifdef CONFIG_PAX_SEGMEXEC
14949 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14950 + pax_task_size = SEGMEXEC_TASK_SIZE;
14951 +#endif
14952 +
14953 + pax_task_size -= PAGE_SIZE;
14954 +
14955 + /* requested length too big for entire address space */
14956 + if (len > pax_task_size)
14957 + return -ENOMEM;
14958 +
14959 + if (flags & MAP_FIXED)
14960 + return addr;
14961 +
14962 +#ifdef CONFIG_PAX_PAGEEXEC
14963 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
14964 + goto bottomup;
14965 +#endif
14966 +
14967 +#ifdef CONFIG_PAX_RANDMMAP
14968 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14969 +#endif
14970 +
14971 + /* requesting a specific address */
14972 + if (addr) {
14973 + addr = PAGE_ALIGN(addr);
14974 + if (pax_task_size - len >= addr) {
14975 + vma = find_vma(mm, addr);
14976 + if (check_heap_stack_gap(vma, addr, len))
14977 + return addr;
14978 + }
14979 + }
14980 +
14981 + /* check if free_area_cache is useful for us */
14982 + if (len <= mm->cached_hole_size) {
14983 + mm->cached_hole_size = 0;
14984 + mm->free_area_cache = mm->mmap_base;
14985 + }
14986 +
14987 + /* either no address requested or can't fit in requested address hole */
14988 + addr = mm->free_area_cache;
14989 +
14990 + /* make sure it can fit in the remaining address space */
14991 + if (addr > len) {
14992 + vma = find_vma(mm, addr-len);
14993 + if (check_heap_stack_gap(vma, addr - len, len))
14994 + /* remember the address as a hint for next time */
14995 + return (mm->free_area_cache = addr-len);
14996 + }
14997 +
14998 + if (mm->mmap_base < len)
14999 + goto bottomup;
15000 +
15001 + addr = mm->mmap_base-len;
15002 +
15003 + do {
15004 + /*
15005 + * Lookup failure means no vma is above this address,
15006 + * else if new region fits below vma->vm_start,
15007 + * return with success:
15008 + */
15009 + vma = find_vma(mm, addr);
15010 + if (check_heap_stack_gap(vma, addr, len))
15011 + /* remember the address as a hint for next time */
15012 + return (mm->free_area_cache = addr);
15013 +
15014 + /* remember the largest hole we saw so far */
15015 + if (addr + mm->cached_hole_size < vma->vm_start)
15016 + mm->cached_hole_size = vma->vm_start - addr;
15017 +
15018 + /* try just below the current vma->vm_start */
15019 + addr = skip_heap_stack_gap(vma, len);
15020 + } while (!IS_ERR_VALUE(addr));
15021 +
15022 +bottomup:
15023 + /*
15024 + * A failed mmap() very likely causes application failure,
15025 + * so fall back to the bottom-up function here. This scenario
15026 + * can happen with large stack limits and large mmap()
15027 + * allocations.
15028 + */
15029 +
15030 +#ifdef CONFIG_PAX_SEGMEXEC
15031 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15032 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15033 + else
15034 +#endif
15035 +
15036 + mm->mmap_base = TASK_UNMAPPED_BASE;
15037 +
15038 +#ifdef CONFIG_PAX_RANDMMAP
15039 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15040 + mm->mmap_base += mm->delta_mmap;
15041 +#endif
15042 +
15043 + mm->free_area_cache = mm->mmap_base;
15044 + mm->cached_hole_size = ~0UL;
15045 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15046 + /*
15047 + * Restore the topdown base:
15048 + */
15049 + mm->mmap_base = base;
15050 + mm->free_area_cache = base;
15051 + mm->cached_hole_size = ~0UL;
15052 +
15053 + return addr;
15054 }
15055 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c
15056 --- linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-05-19 00:06:34.000000000 -0400
15057 +++ linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-08-05 19:44:35.000000000 -0400
15058 @@ -32,8 +32,8 @@ out:
15059 return error;
15060 }
15061
15062 -static void find_start_end(unsigned long flags, unsigned long *begin,
15063 - unsigned long *end)
15064 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15065 + unsigned long *begin, unsigned long *end)
15066 {
15067 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15068 unsigned long new_begin;
15069 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15070 *begin = new_begin;
15071 }
15072 } else {
15073 - *begin = TASK_UNMAPPED_BASE;
15074 + *begin = mm->mmap_base;
15075 *end = TASK_SIZE;
15076 }
15077 }
15078 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15079 if (flags & MAP_FIXED)
15080 return addr;
15081
15082 - find_start_end(flags, &begin, &end);
15083 + find_start_end(mm, flags, &begin, &end);
15084
15085 if (len > end)
15086 return -ENOMEM;
15087
15088 +#ifdef CONFIG_PAX_RANDMMAP
15089 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15090 +#endif
15091 +
15092 if (addr) {
15093 addr = PAGE_ALIGN(addr);
15094 vma = find_vma(mm, addr);
15095 - if (end - len >= addr &&
15096 - (!vma || addr + len <= vma->vm_start))
15097 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15098 return addr;
15099 }
15100 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15101 @@ -106,7 +109,7 @@ full_search:
15102 }
15103 return -ENOMEM;
15104 }
15105 - if (!vma || addr + len <= vma->vm_start) {
15106 + if (check_heap_stack_gap(vma, addr, len)) {
15107 /*
15108 * Remember the place where we stopped the search:
15109 */
15110 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15111 {
15112 struct vm_area_struct *vma;
15113 struct mm_struct *mm = current->mm;
15114 - unsigned long addr = addr0;
15115 + unsigned long base = mm->mmap_base, addr = addr0;
15116
15117 /* requested length too big for entire address space */
15118 if (len > TASK_SIZE)
15119 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15120 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15121 goto bottomup;
15122
15123 +#ifdef CONFIG_PAX_RANDMMAP
15124 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15125 +#endif
15126 +
15127 /* requesting a specific address */
15128 if (addr) {
15129 addr = PAGE_ALIGN(addr);
15130 - vma = find_vma(mm, addr);
15131 - if (TASK_SIZE - len >= addr &&
15132 - (!vma || addr + len <= vma->vm_start))
15133 - return addr;
15134 + if (TASK_SIZE - len >= addr) {
15135 + vma = find_vma(mm, addr);
15136 + if (check_heap_stack_gap(vma, addr, len))
15137 + return addr;
15138 + }
15139 }
15140
15141 /* check if free_area_cache is useful for us */
15142 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15143 /* make sure it can fit in the remaining address space */
15144 if (addr > len) {
15145 vma = find_vma(mm, addr-len);
15146 - if (!vma || addr <= vma->vm_start)
15147 + if (check_heap_stack_gap(vma, addr - len, len))
15148 /* remember the address as a hint for next time */
15149 return mm->free_area_cache = addr-len;
15150 }
15151 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15152 * return with success:
15153 */
15154 vma = find_vma(mm, addr);
15155 - if (!vma || addr+len <= vma->vm_start)
15156 + if (check_heap_stack_gap(vma, addr, len))
15157 /* remember the address as a hint for next time */
15158 return mm->free_area_cache = addr;
15159
15160 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15161 mm->cached_hole_size = vma->vm_start - addr;
15162
15163 /* try just below the current vma->vm_start */
15164 - addr = vma->vm_start-len;
15165 - } while (len < vma->vm_start);
15166 + addr = skip_heap_stack_gap(vma, len);
15167 + } while (!IS_ERR_VALUE(addr));
15168
15169 bottomup:
15170 /*
15171 @@ -198,13 +206,21 @@ bottomup:
15172 * can happen with large stack limits and large mmap()
15173 * allocations.
15174 */
15175 + mm->mmap_base = TASK_UNMAPPED_BASE;
15176 +
15177 +#ifdef CONFIG_PAX_RANDMMAP
15178 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15179 + mm->mmap_base += mm->delta_mmap;
15180 +#endif
15181 +
15182 + mm->free_area_cache = mm->mmap_base;
15183 mm->cached_hole_size = ~0UL;
15184 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15185 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15186 /*
15187 * Restore the topdown base:
15188 */
15189 - mm->free_area_cache = mm->mmap_base;
15190 + mm->mmap_base = base;
15191 + mm->free_area_cache = base;
15192 mm->cached_hole_size = ~0UL;
15193
15194 return addr;
15195 diff -urNp linux-2.6.39.4/arch/x86/kernel/tboot.c linux-2.6.39.4/arch/x86/kernel/tboot.c
15196 --- linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-05-19 00:06:34.000000000 -0400
15197 +++ linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-08-05 19:44:35.000000000 -0400
15198 @@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
15199
15200 void tboot_shutdown(u32 shutdown_type)
15201 {
15202 - void (*shutdown)(void);
15203 + void (* __noreturn shutdown)(void);
15204
15205 if (!tboot_enabled())
15206 return;
15207 @@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
15208
15209 switch_to_tboot_pt();
15210
15211 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15212 + shutdown = (void *)tboot->shutdown_entry;
15213 shutdown();
15214
15215 /* should not reach here */
15216 @@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15217 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15218 }
15219
15220 -static atomic_t ap_wfs_count;
15221 +static atomic_unchecked_t ap_wfs_count;
15222
15223 static int tboot_wait_for_aps(int num_aps)
15224 {
15225 @@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
15226 {
15227 switch (action) {
15228 case CPU_DYING:
15229 - atomic_inc(&ap_wfs_count);
15230 + atomic_inc_unchecked(&ap_wfs_count);
15231 if (num_online_cpus() == 1)
15232 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15233 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15234 return NOTIFY_BAD;
15235 break;
15236 }
15237 @@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
15238
15239 tboot_create_trampoline();
15240
15241 - atomic_set(&ap_wfs_count, 0);
15242 + atomic_set_unchecked(&ap_wfs_count, 0);
15243 register_hotcpu_notifier(&tboot_cpu_notifier);
15244 return 0;
15245 }
15246 diff -urNp linux-2.6.39.4/arch/x86/kernel/time.c linux-2.6.39.4/arch/x86/kernel/time.c
15247 --- linux-2.6.39.4/arch/x86/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
15248 +++ linux-2.6.39.4/arch/x86/kernel/time.c 2011-08-05 19:44:35.000000000 -0400
15249 @@ -22,17 +22,13 @@
15250 #include <asm/hpet.h>
15251 #include <asm/time.h>
15252
15253 -#ifdef CONFIG_X86_64
15254 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
15255 -#endif
15256 -
15257 unsigned long profile_pc(struct pt_regs *regs)
15258 {
15259 unsigned long pc = instruction_pointer(regs);
15260
15261 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15262 + if (!user_mode(regs) && in_lock_functions(pc)) {
15263 #ifdef CONFIG_FRAME_POINTER
15264 - return *(unsigned long *)(regs->bp + sizeof(long));
15265 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15266 #else
15267 unsigned long *sp =
15268 (unsigned long *)kernel_stack_pointer(regs);
15269 @@ -41,11 +37,17 @@ unsigned long profile_pc(struct pt_regs
15270 * or above a saved flags. Eflags has bits 22-31 zero,
15271 * kernel addresses don't.
15272 */
15273 +
15274 +#ifdef CONFIG_PAX_KERNEXEC
15275 + return ktla_ktva(sp[0]);
15276 +#else
15277 if (sp[0] >> 22)
15278 return sp[0];
15279 if (sp[1] >> 22)
15280 return sp[1];
15281 #endif
15282 +
15283 +#endif
15284 }
15285 return pc;
15286 }
15287 diff -urNp linux-2.6.39.4/arch/x86/kernel/tls.c linux-2.6.39.4/arch/x86/kernel/tls.c
15288 --- linux-2.6.39.4/arch/x86/kernel/tls.c 2011-05-19 00:06:34.000000000 -0400
15289 +++ linux-2.6.39.4/arch/x86/kernel/tls.c 2011-08-05 19:44:35.000000000 -0400
15290 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15291 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15292 return -EINVAL;
15293
15294 +#ifdef CONFIG_PAX_SEGMEXEC
15295 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15296 + return -EINVAL;
15297 +#endif
15298 +
15299 set_tls_desc(p, idx, &info, 1);
15300
15301 return 0;
15302 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_32.S linux-2.6.39.4/arch/x86/kernel/trampoline_32.S
15303 --- linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-05-19 00:06:34.000000000 -0400
15304 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-08-05 19:44:35.000000000 -0400
15305 @@ -32,6 +32,12 @@
15306 #include <asm/segment.h>
15307 #include <asm/page_types.h>
15308
15309 +#ifdef CONFIG_PAX_KERNEXEC
15310 +#define ta(X) (X)
15311 +#else
15312 +#define ta(X) ((X) - __PAGE_OFFSET)
15313 +#endif
15314 +
15315 #ifdef CONFIG_SMP
15316
15317 .section ".x86_trampoline","a"
15318 @@ -62,7 +68,7 @@ r_base = .
15319 inc %ax # protected mode (PE) bit
15320 lmsw %ax # into protected mode
15321 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15322 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15323 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15324
15325 # These need to be in the same 64K segment as the above;
15326 # hence we don't use the boot_gdt_descr defined in head.S
15327 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_64.S linux-2.6.39.4/arch/x86/kernel/trampoline_64.S
15328 --- linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-05-19 00:06:34.000000000 -0400
15329 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-08-05 19:44:35.000000000 -0400
15330 @@ -90,7 +90,7 @@ startup_32:
15331 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15332 movl %eax, %ds
15333
15334 - movl $X86_CR4_PAE, %eax
15335 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15336 movl %eax, %cr4 # Enable PAE mode
15337
15338 # Setup trampoline 4 level pagetables
15339 @@ -138,7 +138,7 @@ tidt:
15340 # so the kernel can live anywhere
15341 .balign 4
15342 tgdt:
15343 - .short tgdt_end - tgdt # gdt limit
15344 + .short tgdt_end - tgdt - 1 # gdt limit
15345 .long tgdt - r_base
15346 .short 0
15347 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15348 diff -urNp linux-2.6.39.4/arch/x86/kernel/traps.c linux-2.6.39.4/arch/x86/kernel/traps.c
15349 --- linux-2.6.39.4/arch/x86/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
15350 +++ linux-2.6.39.4/arch/x86/kernel/traps.c 2011-08-05 19:44:35.000000000 -0400
15351 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15352
15353 /* Do we ignore FPU interrupts ? */
15354 char ignore_fpu_irq;
15355 -
15356 -/*
15357 - * The IDT has to be page-aligned to simplify the Pentium
15358 - * F0 0F bug workaround.
15359 - */
15360 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15361 #endif
15362
15363 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15364 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15365 }
15366
15367 static void __kprobes
15368 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15369 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15370 long error_code, siginfo_t *info)
15371 {
15372 struct task_struct *tsk = current;
15373
15374 #ifdef CONFIG_X86_32
15375 - if (regs->flags & X86_VM_MASK) {
15376 + if (v8086_mode(regs)) {
15377 /*
15378 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15379 * On nmi (interrupt 2), do_trap should not be called.
15380 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15381 }
15382 #endif
15383
15384 - if (!user_mode(regs))
15385 + if (!user_mode_novm(regs))
15386 goto kernel_trap;
15387
15388 #ifdef CONFIG_X86_32
15389 @@ -157,7 +151,7 @@ trap_signal:
15390 printk_ratelimit()) {
15391 printk(KERN_INFO
15392 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15393 - tsk->comm, tsk->pid, str,
15394 + tsk->comm, task_pid_nr(tsk), str,
15395 regs->ip, regs->sp, error_code);
15396 print_vma_addr(" in ", regs->ip);
15397 printk("\n");
15398 @@ -174,8 +168,20 @@ kernel_trap:
15399 if (!fixup_exception(regs)) {
15400 tsk->thread.error_code = error_code;
15401 tsk->thread.trap_no = trapnr;
15402 +
15403 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15404 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15405 + str = "PAX: suspicious stack segment fault";
15406 +#endif
15407 +
15408 die(str, regs, error_code);
15409 }
15410 +
15411 +#ifdef CONFIG_PAX_REFCOUNT
15412 + if (trapnr == 4)
15413 + pax_report_refcount_overflow(regs);
15414 +#endif
15415 +
15416 return;
15417
15418 #ifdef CONFIG_X86_32
15419 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15420 conditional_sti(regs);
15421
15422 #ifdef CONFIG_X86_32
15423 - if (regs->flags & X86_VM_MASK)
15424 + if (v8086_mode(regs))
15425 goto gp_in_vm86;
15426 #endif
15427
15428 tsk = current;
15429 - if (!user_mode(regs))
15430 + if (!user_mode_novm(regs))
15431 goto gp_in_kernel;
15432
15433 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15434 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15435 + struct mm_struct *mm = tsk->mm;
15436 + unsigned long limit;
15437 +
15438 + down_write(&mm->mmap_sem);
15439 + limit = mm->context.user_cs_limit;
15440 + if (limit < TASK_SIZE) {
15441 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15442 + up_write(&mm->mmap_sem);
15443 + return;
15444 + }
15445 + up_write(&mm->mmap_sem);
15446 + }
15447 +#endif
15448 +
15449 tsk->thread.error_code = error_code;
15450 tsk->thread.trap_no = 13;
15451
15452 @@ -304,6 +326,13 @@ gp_in_kernel:
15453 if (notify_die(DIE_GPF, "general protection fault", regs,
15454 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15455 return;
15456 +
15457 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15458 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15459 + die("PAX: suspicious general protection fault", regs, error_code);
15460 + else
15461 +#endif
15462 +
15463 die("general protection fault", regs, error_code);
15464 }
15465
15466 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15467 dotraplinkage notrace __kprobes void
15468 do_nmi(struct pt_regs *regs, long error_code)
15469 {
15470 +
15471 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15472 + if (!user_mode(regs)) {
15473 + unsigned long cs = regs->cs & 0xFFFF;
15474 + unsigned long ip = ktva_ktla(regs->ip);
15475 +
15476 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15477 + regs->ip = ip;
15478 + }
15479 +#endif
15480 +
15481 nmi_enter();
15482
15483 inc_irq_stat(__nmi_count);
15484 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15485 /* It's safe to allow irq's after DR6 has been saved */
15486 preempt_conditional_sti(regs);
15487
15488 - if (regs->flags & X86_VM_MASK) {
15489 + if (v8086_mode(regs)) {
15490 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15491 error_code, 1);
15492 preempt_conditional_cli(regs);
15493 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15494 * We already checked v86 mode above, so we can check for kernel mode
15495 * by just checking the CPL of CS.
15496 */
15497 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15498 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15499 tsk->thread.debugreg6 &= ~DR_STEP;
15500 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15501 regs->flags &= ~X86_EFLAGS_TF;
15502 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15503 return;
15504 conditional_sti(regs);
15505
15506 - if (!user_mode_vm(regs))
15507 + if (!user_mode(regs))
15508 {
15509 if (!fixup_exception(regs)) {
15510 task->thread.error_code = error_code;
15511 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15512 void __math_state_restore(void)
15513 {
15514 struct thread_info *thread = current_thread_info();
15515 - struct task_struct *tsk = thread->task;
15516 + struct task_struct *tsk = current;
15517
15518 /*
15519 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15520 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15521 */
15522 asmlinkage void math_state_restore(void)
15523 {
15524 - struct thread_info *thread = current_thread_info();
15525 - struct task_struct *tsk = thread->task;
15526 + struct task_struct *tsk = current;
15527
15528 if (!tsk_used_math(tsk)) {
15529 local_irq_enable();
15530 diff -urNp linux-2.6.39.4/arch/x86/kernel/verify_cpu.S linux-2.6.39.4/arch/x86/kernel/verify_cpu.S
15531 --- linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-05-19 00:06:34.000000000 -0400
15532 +++ linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-08-05 19:44:35.000000000 -0400
15533 @@ -20,6 +20,7 @@
15534 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15535 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15536 * arch/x86/kernel/head_32.S: processor startup
15537 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15538 *
15539 * verify_cpu, returns the status of longmode and SSE in register %eax.
15540 * 0: Success 1: Failure
15541 diff -urNp linux-2.6.39.4/arch/x86/kernel/vm86_32.c linux-2.6.39.4/arch/x86/kernel/vm86_32.c
15542 --- linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-05-19 00:06:34.000000000 -0400
15543 +++ linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-08-05 19:44:35.000000000 -0400
15544 @@ -41,6 +41,7 @@
15545 #include <linux/ptrace.h>
15546 #include <linux/audit.h>
15547 #include <linux/stddef.h>
15548 +#include <linux/grsecurity.h>
15549
15550 #include <asm/uaccess.h>
15551 #include <asm/io.h>
15552 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15553 do_exit(SIGSEGV);
15554 }
15555
15556 - tss = &per_cpu(init_tss, get_cpu());
15557 + tss = init_tss + get_cpu();
15558 current->thread.sp0 = current->thread.saved_sp0;
15559 current->thread.sysenter_cs = __KERNEL_CS;
15560 load_sp0(tss, &current->thread);
15561 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15562 struct task_struct *tsk;
15563 int tmp, ret = -EPERM;
15564
15565 +#ifdef CONFIG_GRKERNSEC_VM86
15566 + if (!capable(CAP_SYS_RAWIO)) {
15567 + gr_handle_vm86();
15568 + goto out;
15569 + }
15570 +#endif
15571 +
15572 tsk = current;
15573 if (tsk->thread.saved_sp0)
15574 goto out;
15575 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15576 int tmp, ret;
15577 struct vm86plus_struct __user *v86;
15578
15579 +#ifdef CONFIG_GRKERNSEC_VM86
15580 + if (!capable(CAP_SYS_RAWIO)) {
15581 + gr_handle_vm86();
15582 + ret = -EPERM;
15583 + goto out;
15584 + }
15585 +#endif
15586 +
15587 tsk = current;
15588 switch (cmd) {
15589 case VM86_REQUEST_IRQ:
15590 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15591 tsk->thread.saved_fs = info->regs32->fs;
15592 tsk->thread.saved_gs = get_user_gs(info->regs32);
15593
15594 - tss = &per_cpu(init_tss, get_cpu());
15595 + tss = init_tss + get_cpu();
15596 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15597 if (cpu_has_sep)
15598 tsk->thread.sysenter_cs = 0;
15599 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15600 goto cannot_handle;
15601 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15602 goto cannot_handle;
15603 - intr_ptr = (unsigned long __user *) (i << 2);
15604 + intr_ptr = (__force unsigned long __user *) (i << 2);
15605 if (get_user(segoffs, intr_ptr))
15606 goto cannot_handle;
15607 if ((segoffs >> 16) == BIOSSEG)
15608 diff -urNp linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S
15609 --- linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
15610 +++ linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-08-05 19:44:35.000000000 -0400
15611 @@ -26,6 +26,13 @@
15612 #include <asm/page_types.h>
15613 #include <asm/cache.h>
15614 #include <asm/boot.h>
15615 +#include <asm/segment.h>
15616 +
15617 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15618 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15619 +#else
15620 +#define __KERNEL_TEXT_OFFSET 0
15621 +#endif
15622
15623 #undef i386 /* in case the preprocessor is a 32bit one */
15624
15625 @@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
15626 #ifdef CONFIG_X86_32
15627 OUTPUT_ARCH(i386)
15628 ENTRY(phys_startup_32)
15629 -jiffies = jiffies_64;
15630 #else
15631 OUTPUT_ARCH(i386:x86-64)
15632 ENTRY(phys_startup_64)
15633 -jiffies_64 = jiffies;
15634 #endif
15635
15636 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
15637 @@ -69,31 +74,46 @@ jiffies_64 = jiffies;
15638
15639 PHDRS {
15640 text PT_LOAD FLAGS(5); /* R_E */
15641 +#ifdef CONFIG_X86_32
15642 + module PT_LOAD FLAGS(5); /* R_E */
15643 +#endif
15644 +#ifdef CONFIG_XEN
15645 + rodata PT_LOAD FLAGS(5); /* R_E */
15646 +#else
15647 + rodata PT_LOAD FLAGS(4); /* R__ */
15648 +#endif
15649 data PT_LOAD FLAGS(6); /* RW_ */
15650 #ifdef CONFIG_X86_64
15651 user PT_LOAD FLAGS(5); /* R_E */
15652 +#endif
15653 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15654 #ifdef CONFIG_SMP
15655 percpu PT_LOAD FLAGS(6); /* RW_ */
15656 #endif
15657 + text.init PT_LOAD FLAGS(5); /* R_E */
15658 + text.exit PT_LOAD FLAGS(5); /* R_E */
15659 init PT_LOAD FLAGS(7); /* RWE */
15660 -#endif
15661 note PT_NOTE FLAGS(0); /* ___ */
15662 }
15663
15664 SECTIONS
15665 {
15666 #ifdef CONFIG_X86_32
15667 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15668 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15669 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15670 #else
15671 - . = __START_KERNEL;
15672 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15673 + . = __START_KERNEL;
15674 #endif
15675
15676 /* Text and read-only data */
15677 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15678 - _text = .;
15679 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15680 /* bootstrapping code */
15681 +#ifdef CONFIG_X86_32
15682 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15683 +#else
15684 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15685 +#endif
15686 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15687 + _text = .;
15688 HEAD_TEXT
15689 #ifdef CONFIG_X86_32
15690 . = ALIGN(PAGE_SIZE);
15691 @@ -109,13 +129,47 @@ SECTIONS
15692 IRQENTRY_TEXT
15693 *(.fixup)
15694 *(.gnu.warning)
15695 - /* End of text section */
15696 - _etext = .;
15697 } :text = 0x9090
15698
15699 - NOTES :text :note
15700 + . += __KERNEL_TEXT_OFFSET;
15701 +
15702 +#ifdef CONFIG_X86_32
15703 + . = ALIGN(PAGE_SIZE);
15704 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15705 +
15706 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15707 + MODULES_EXEC_VADDR = .;
15708 + BYTE(0)
15709 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15710 + . = ALIGN(HPAGE_SIZE);
15711 + MODULES_EXEC_END = . - 1;
15712 +#endif
15713 +
15714 + } :module
15715 +#endif
15716 +
15717 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15718 + /* End of text section */
15719 + _etext = . - __KERNEL_TEXT_OFFSET;
15720 + }
15721
15722 - EXCEPTION_TABLE(16) :text = 0x9090
15723 +#ifdef CONFIG_X86_32
15724 + . = ALIGN(PAGE_SIZE);
15725 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15726 + *(.idt)
15727 + . = ALIGN(PAGE_SIZE);
15728 + *(.empty_zero_page)
15729 + *(.initial_pg_fixmap)
15730 + *(.initial_pg_pmd)
15731 + *(.initial_page_table)
15732 + *(.swapper_pg_dir)
15733 + } :rodata
15734 +#endif
15735 +
15736 + . = ALIGN(PAGE_SIZE);
15737 + NOTES :rodata :note
15738 +
15739 + EXCEPTION_TABLE(16) :rodata
15740
15741 #if defined(CONFIG_DEBUG_RODATA)
15742 /* .text should occupy whole number of pages */
15743 @@ -127,16 +181,20 @@ SECTIONS
15744
15745 /* Data */
15746 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15747 +
15748 +#ifdef CONFIG_PAX_KERNEXEC
15749 + . = ALIGN(HPAGE_SIZE);
15750 +#else
15751 + . = ALIGN(PAGE_SIZE);
15752 +#endif
15753 +
15754 /* Start of data section */
15755 _sdata = .;
15756
15757 /* init_task */
15758 INIT_TASK_DATA(THREAD_SIZE)
15759
15760 -#ifdef CONFIG_X86_32
15761 - /* 32 bit has nosave before _edata */
15762 NOSAVE_DATA
15763 -#endif
15764
15765 PAGE_ALIGNED_DATA(PAGE_SIZE)
15766
15767 @@ -145,6 +203,8 @@ SECTIONS
15768 DATA_DATA
15769 CONSTRUCTORS
15770
15771 + jiffies = jiffies_64;
15772 +
15773 /* rarely changed data like cpu maps */
15774 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
15775
15776 @@ -199,12 +259,6 @@ SECTIONS
15777 }
15778 vgetcpu_mode = VVIRT(.vgetcpu_mode);
15779
15780 - . = ALIGN(L1_CACHE_BYTES);
15781 - .jiffies : AT(VLOAD(.jiffies)) {
15782 - *(.jiffies)
15783 - }
15784 - jiffies = VVIRT(.jiffies);
15785 -
15786 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
15787 *(.vsyscall_3)
15788 }
15789 @@ -220,12 +274,19 @@ SECTIONS
15790 #endif /* CONFIG_X86_64 */
15791
15792 /* Init code and data - will be freed after init */
15793 - . = ALIGN(PAGE_SIZE);
15794 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15795 + BYTE(0)
15796 +
15797 +#ifdef CONFIG_PAX_KERNEXEC
15798 + . = ALIGN(HPAGE_SIZE);
15799 +#else
15800 + . = ALIGN(PAGE_SIZE);
15801 +#endif
15802 +
15803 __init_begin = .; /* paired with __init_end */
15804 - }
15805 + } :init.begin
15806
15807 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15808 +#ifdef CONFIG_SMP
15809 /*
15810 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15811 * output PHDR, so the next output section - .init.text - should
15812 @@ -234,12 +295,27 @@ SECTIONS
15813 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15814 #endif
15815
15816 - INIT_TEXT_SECTION(PAGE_SIZE)
15817 -#ifdef CONFIG_X86_64
15818 - :init
15819 -#endif
15820 + . = ALIGN(PAGE_SIZE);
15821 + init_begin = .;
15822 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15823 + VMLINUX_SYMBOL(_sinittext) = .;
15824 + INIT_TEXT
15825 + VMLINUX_SYMBOL(_einittext) = .;
15826 + . = ALIGN(PAGE_SIZE);
15827 + } :text.init
15828
15829 - INIT_DATA_SECTION(16)
15830 + /*
15831 + * .exit.text is discard at runtime, not link time, to deal with
15832 + * references from .altinstructions and .eh_frame
15833 + */
15834 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15835 + EXIT_TEXT
15836 + . = ALIGN(16);
15837 + } :text.exit
15838 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15839 +
15840 + . = ALIGN(PAGE_SIZE);
15841 + INIT_DATA_SECTION(16) :init
15842
15843 /*
15844 * Code and data for a variety of lowlevel trampolines, to be
15845 @@ -306,19 +382,12 @@ SECTIONS
15846 }
15847
15848 . = ALIGN(8);
15849 - /*
15850 - * .exit.text is discard at runtime, not link time, to deal with
15851 - * references from .altinstructions and .eh_frame
15852 - */
15853 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15854 - EXIT_TEXT
15855 - }
15856
15857 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15858 EXIT_DATA
15859 }
15860
15861 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15862 +#ifndef CONFIG_SMP
15863 PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
15864 #endif
15865
15866 @@ -337,16 +406,10 @@ SECTIONS
15867 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15868 __smp_locks = .;
15869 *(.smp_locks)
15870 - . = ALIGN(PAGE_SIZE);
15871 __smp_locks_end = .;
15872 + . = ALIGN(PAGE_SIZE);
15873 }
15874
15875 -#ifdef CONFIG_X86_64
15876 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15877 - NOSAVE_DATA
15878 - }
15879 -#endif
15880 -
15881 /* BSS */
15882 . = ALIGN(PAGE_SIZE);
15883 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15884 @@ -362,6 +425,7 @@ SECTIONS
15885 __brk_base = .;
15886 . += 64 * 1024; /* 64k alignment slop space */
15887 *(.brk_reservation) /* areas brk users have reserved */
15888 + . = ALIGN(HPAGE_SIZE);
15889 __brk_limit = .;
15890 }
15891
15892 @@ -388,13 +452,12 @@ SECTIONS
15893 * for the boot processor.
15894 */
15895 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15896 -INIT_PER_CPU(gdt_page);
15897 INIT_PER_CPU(irq_stack_union);
15898
15899 /*
15900 * Build-time check on the image size:
15901 */
15902 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15903 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15904 "kernel image bigger than KERNEL_IMAGE_SIZE");
15905
15906 #ifdef CONFIG_SMP
15907 diff -urNp linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c
15908 --- linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-05-19 00:06:34.000000000 -0400
15909 +++ linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-08-05 19:44:35.000000000 -0400
15910 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
15911
15912 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
15913 /* copy vsyscall data */
15914 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
15915 vsyscall_gtod_data.clock.vread = clock->vread;
15916 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
15917 vsyscall_gtod_data.clock.mask = clock->mask;
15918 @@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
15919 We do this here because otherwise user space would do it on
15920 its own in a likely inferior way (no access to jiffies).
15921 If you don't like it pass NULL. */
15922 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
15923 + if (tcache && tcache->blob[0] == (j = jiffies)) {
15924 p = tcache->blob[1];
15925 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
15926 /* Load per CPU data from RDTSCP */
15927 diff -urNp linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c
15928 --- linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-05-19 00:06:34.000000000 -0400
15929 +++ linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-05 19:44:35.000000000 -0400
15930 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15931 EXPORT_SYMBOL(copy_user_generic_string);
15932 EXPORT_SYMBOL(copy_user_generic_unrolled);
15933 EXPORT_SYMBOL(__copy_user_nocache);
15934 -EXPORT_SYMBOL(_copy_from_user);
15935 -EXPORT_SYMBOL(_copy_to_user);
15936
15937 EXPORT_SYMBOL(copy_page);
15938 EXPORT_SYMBOL(clear_page);
15939 diff -urNp linux-2.6.39.4/arch/x86/kernel/xsave.c linux-2.6.39.4/arch/x86/kernel/xsave.c
15940 --- linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-05-19 00:06:34.000000000 -0400
15941 +++ linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-08-05 19:44:35.000000000 -0400
15942 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15943 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15944 return -EINVAL;
15945
15946 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15947 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15948 fx_sw_user->extended_size -
15949 FP_XSTATE_MAGIC2_SIZE));
15950 if (err)
15951 @@ -267,7 +267,7 @@ fx_only:
15952 * the other extended state.
15953 */
15954 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15955 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15956 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15957 }
15958
15959 /*
15960 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15961 if (use_xsave())
15962 err = restore_user_xstate(buf);
15963 else
15964 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15965 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15966 buf);
15967 if (unlikely(err)) {
15968 /*
15969 diff -urNp linux-2.6.39.4/arch/x86/kvm/emulate.c linux-2.6.39.4/arch/x86/kvm/emulate.c
15970 --- linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-05-19 00:06:34.000000000 -0400
15971 +++ linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-08-05 19:44:35.000000000 -0400
15972 @@ -89,7 +89,7 @@
15973 #define Src2ImmByte (2<<29)
15974 #define Src2One (3<<29)
15975 #define Src2Imm (4<<29)
15976 -#define Src2Mask (7<<29)
15977 +#define Src2Mask (7U<<29)
15978
15979 #define X2(x...) x, x
15980 #define X3(x...) X2(x), x
15981 @@ -190,6 +190,7 @@ struct group_dual {
15982
15983 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
15984 do { \
15985 + unsigned long _tmp; \
15986 __asm__ __volatile__ ( \
15987 _PRE_EFLAGS("0", "4", "2") \
15988 _op _suffix " %"_x"3,%1; " \
15989 @@ -203,8 +204,6 @@ struct group_dual {
15990 /* Raw emulation: instruction has two explicit operands. */
15991 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15992 do { \
15993 - unsigned long _tmp; \
15994 - \
15995 switch ((_dst).bytes) { \
15996 case 2: \
15997 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15998 @@ -220,7 +219,6 @@ struct group_dual {
15999
16000 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16001 do { \
16002 - unsigned long _tmp; \
16003 switch ((_dst).bytes) { \
16004 case 1: \
16005 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16006 diff -urNp linux-2.6.39.4/arch/x86/kvm/lapic.c linux-2.6.39.4/arch/x86/kvm/lapic.c
16007 --- linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-05-19 00:06:34.000000000 -0400
16008 +++ linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-08-05 19:44:35.000000000 -0400
16009 @@ -53,7 +53,7 @@
16010 #define APIC_BUS_CYCLE_NS 1
16011
16012 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16013 -#define apic_debug(fmt, arg...)
16014 +#define apic_debug(fmt, arg...) do {} while (0)
16015
16016 #define APIC_LVT_NUM 6
16017 /* 14 is the version for Xeon and Pentium 8.4.8*/
16018 diff -urNp linux-2.6.39.4/arch/x86/kvm/mmu.c linux-2.6.39.4/arch/x86/kvm/mmu.c
16019 --- linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-05-19 00:06:34.000000000 -0400
16020 +++ linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-08-05 19:44:35.000000000 -0400
16021 @@ -3240,7 +3240,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16022
16023 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16024
16025 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16026 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16027
16028 /*
16029 * Assume that the pte write on a page table of the same type
16030 @@ -3275,7 +3275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16031 smp_rmb();
16032
16033 spin_lock(&vcpu->kvm->mmu_lock);
16034 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16035 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16036 gentry = 0;
16037 kvm_mmu_free_some_pages(vcpu);
16038 ++vcpu->kvm->stat.mmu_pte_write;
16039 diff -urNp linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h
16040 --- linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-05-19 00:06:34.000000000 -0400
16041 +++ linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-08-05 19:44:35.000000000 -0400
16042 @@ -552,6 +552,8 @@ static int FNAME(page_fault)(struct kvm_
16043 unsigned long mmu_seq;
16044 bool map_writable;
16045
16046 + pax_track_stack();
16047 +
16048 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16049
16050 r = mmu_topup_memory_caches(vcpu);
16051 @@ -672,7 +674,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16052 if (need_flush)
16053 kvm_flush_remote_tlbs(vcpu->kvm);
16054
16055 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16056 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16057
16058 spin_unlock(&vcpu->kvm->mmu_lock);
16059
16060 diff -urNp linux-2.6.39.4/arch/x86/kvm/svm.c linux-2.6.39.4/arch/x86/kvm/svm.c
16061 --- linux-2.6.39.4/arch/x86/kvm/svm.c 2011-05-19 00:06:34.000000000 -0400
16062 +++ linux-2.6.39.4/arch/x86/kvm/svm.c 2011-08-05 20:34:06.000000000 -0400
16063 @@ -3278,7 +3278,11 @@ static void reload_tss(struct kvm_vcpu *
16064 int cpu = raw_smp_processor_id();
16065
16066 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16067 +
16068 + pax_open_kernel();
16069 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16070 + pax_close_kernel();
16071 +
16072 load_TR_desc();
16073 }
16074
16075 @@ -3656,6 +3660,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16076 #endif
16077 #endif
16078
16079 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16080 + __set_fs(current_thread_info()->addr_limit);
16081 +#endif
16082 +
16083 reload_tss(vcpu);
16084
16085 local_irq_disable();
16086 diff -urNp linux-2.6.39.4/arch/x86/kvm/vmx.c linux-2.6.39.4/arch/x86/kvm/vmx.c
16087 --- linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-05-19 00:06:34.000000000 -0400
16088 +++ linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-08-05 20:34:06.000000000 -0400
16089 @@ -725,7 +725,11 @@ static void reload_tss(void)
16090 struct desc_struct *descs;
16091
16092 descs = (void *)gdt->address;
16093 +
16094 + pax_open_kernel();
16095 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16096 + pax_close_kernel();
16097 +
16098 load_TR_desc();
16099 }
16100
16101 @@ -1648,8 +1652,11 @@ static __init int hardware_setup(void)
16102 if (!cpu_has_vmx_flexpriority())
16103 flexpriority_enabled = 0;
16104
16105 - if (!cpu_has_vmx_tpr_shadow())
16106 - kvm_x86_ops->update_cr8_intercept = NULL;
16107 + if (!cpu_has_vmx_tpr_shadow()) {
16108 + pax_open_kernel();
16109 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16110 + pax_close_kernel();
16111 + }
16112
16113 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16114 kvm_disable_largepages();
16115 @@ -2693,7 +2700,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16116 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16117
16118 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16119 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16120 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16121 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16122 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16123 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16124 @@ -4068,6 +4075,12 @@ static void __noclone vmx_vcpu_run(struc
16125 "jmp .Lkvm_vmx_return \n\t"
16126 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16127 ".Lkvm_vmx_return: "
16128 +
16129 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16130 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16131 + ".Lkvm_vmx_return2: "
16132 +#endif
16133 +
16134 /* Save guest registers, load host registers, keep flags */
16135 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16136 "pop %0 \n\t"
16137 @@ -4116,6 +4129,11 @@ static void __noclone vmx_vcpu_run(struc
16138 #endif
16139 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16140 [wordsize]"i"(sizeof(ulong))
16141 +
16142 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16143 + ,[cs]"i"(__KERNEL_CS)
16144 +#endif
16145 +
16146 : "cc", "memory"
16147 , R"ax", R"bx", R"di", R"si"
16148 #ifdef CONFIG_X86_64
16149 @@ -4130,7 +4148,16 @@ static void __noclone vmx_vcpu_run(struc
16150
16151 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16152
16153 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16154 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16155 +
16156 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16157 + loadsegment(fs, __KERNEL_PERCPU);
16158 +#endif
16159 +
16160 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16161 + __set_fs(current_thread_info()->addr_limit);
16162 +#endif
16163 +
16164 vmx->launched = 1;
16165
16166 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16167 diff -urNp linux-2.6.39.4/arch/x86/kvm/x86.c linux-2.6.39.4/arch/x86/kvm/x86.c
16168 --- linux-2.6.39.4/arch/x86/kvm/x86.c 2011-05-19 00:06:34.000000000 -0400
16169 +++ linux-2.6.39.4/arch/x86/kvm/x86.c 2011-08-05 20:34:06.000000000 -0400
16170 @@ -2050,6 +2050,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16171 if (n < msr_list.nmsrs)
16172 goto out;
16173 r = -EFAULT;
16174 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16175 + goto out;
16176 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16177 num_msrs_to_save * sizeof(u32)))
16178 goto out;
16179 @@ -2217,15 +2219,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16180 struct kvm_cpuid2 *cpuid,
16181 struct kvm_cpuid_entry2 __user *entries)
16182 {
16183 - int r;
16184 + int r, i;
16185
16186 r = -E2BIG;
16187 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16188 goto out;
16189 r = -EFAULT;
16190 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16191 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16192 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16193 goto out;
16194 + for (i = 0; i < cpuid->nent; ++i) {
16195 + struct kvm_cpuid_entry2 cpuid_entry;
16196 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16197 + goto out;
16198 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16199 + }
16200 vcpu->arch.cpuid_nent = cpuid->nent;
16201 kvm_apic_set_version(vcpu);
16202 kvm_x86_ops->cpuid_update(vcpu);
16203 @@ -2240,15 +2247,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16204 struct kvm_cpuid2 *cpuid,
16205 struct kvm_cpuid_entry2 __user *entries)
16206 {
16207 - int r;
16208 + int r, i;
16209
16210 r = -E2BIG;
16211 if (cpuid->nent < vcpu->arch.cpuid_nent)
16212 goto out;
16213 r = -EFAULT;
16214 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16215 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16216 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16217 goto out;
16218 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16219 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16220 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16221 + goto out;
16222 + }
16223 return 0;
16224
16225 out:
16226 @@ -2526,7 +2537,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16227 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16228 struct kvm_interrupt *irq)
16229 {
16230 - if (irq->irq < 0 || irq->irq >= 256)
16231 + if (irq->irq >= 256)
16232 return -EINVAL;
16233 if (irqchip_in_kernel(vcpu->kvm))
16234 return -ENXIO;
16235 @@ -4690,7 +4701,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16236 }
16237 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16238
16239 -int kvm_arch_init(void *opaque)
16240 +int kvm_arch_init(const void *opaque)
16241 {
16242 int r;
16243 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16244 diff -urNp linux-2.6.39.4/arch/x86/lguest/boot.c linux-2.6.39.4/arch/x86/lguest/boot.c
16245 --- linux-2.6.39.4/arch/x86/lguest/boot.c 2011-06-25 12:55:22.000000000 -0400
16246 +++ linux-2.6.39.4/arch/x86/lguest/boot.c 2011-08-05 20:34:06.000000000 -0400
16247 @@ -1178,9 +1178,10 @@ static __init int early_put_chars(u32 vt
16248 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16249 * Launcher to reboot us.
16250 */
16251 -static void lguest_restart(char *reason)
16252 +static __noreturn void lguest_restart(char *reason)
16253 {
16254 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16255 + BUG();
16256 }
16257
16258 /*G:050
16259 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_32.c linux-2.6.39.4/arch/x86/lib/atomic64_32.c
16260 --- linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-05-19 00:06:34.000000000 -0400
16261 +++ linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-08-05 19:44:35.000000000 -0400
16262 @@ -8,18 +8,30 @@
16263
16264 long long atomic64_read_cx8(long long, const atomic64_t *v);
16265 EXPORT_SYMBOL(atomic64_read_cx8);
16266 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16267 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16268 long long atomic64_set_cx8(long long, const atomic64_t *v);
16269 EXPORT_SYMBOL(atomic64_set_cx8);
16270 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16271 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16272 long long atomic64_xchg_cx8(long long, unsigned high);
16273 EXPORT_SYMBOL(atomic64_xchg_cx8);
16274 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16275 EXPORT_SYMBOL(atomic64_add_return_cx8);
16276 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16277 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16278 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16279 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16280 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16281 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16282 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16283 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16284 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16285 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16286 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16287 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16288 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16289 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16290 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16291 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16292 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16293 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16294 #ifndef CONFIG_X86_CMPXCHG64
16295 long long atomic64_read_386(long long, const atomic64_t *v);
16296 EXPORT_SYMBOL(atomic64_read_386);
16297 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16298 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16299 long long atomic64_set_386(long long, const atomic64_t *v);
16300 EXPORT_SYMBOL(atomic64_set_386);
16301 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16302 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16303 long long atomic64_xchg_386(long long, unsigned high);
16304 EXPORT_SYMBOL(atomic64_xchg_386);
16305 long long atomic64_add_return_386(long long a, atomic64_t *v);
16306 EXPORT_SYMBOL(atomic64_add_return_386);
16307 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16308 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16309 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16310 EXPORT_SYMBOL(atomic64_sub_return_386);
16311 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16312 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16313 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16314 EXPORT_SYMBOL(atomic64_inc_return_386);
16315 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16316 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16317 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16318 EXPORT_SYMBOL(atomic64_dec_return_386);
16319 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16320 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16321 long long atomic64_add_386(long long a, atomic64_t *v);
16322 EXPORT_SYMBOL(atomic64_add_386);
16323 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16324 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16325 long long atomic64_sub_386(long long a, atomic64_t *v);
16326 EXPORT_SYMBOL(atomic64_sub_386);
16327 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16328 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16329 long long atomic64_inc_386(long long a, atomic64_t *v);
16330 EXPORT_SYMBOL(atomic64_inc_386);
16331 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16332 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16333 long long atomic64_dec_386(long long a, atomic64_t *v);
16334 EXPORT_SYMBOL(atomic64_dec_386);
16335 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16336 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16337 long long atomic64_dec_if_positive_386(atomic64_t *v);
16338 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16339 int atomic64_inc_not_zero_386(atomic64_t *v);
16340 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S
16341 --- linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-05-19 00:06:34.000000000 -0400
16342 +++ linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-08-05 19:44:35.000000000 -0400
16343 @@ -48,6 +48,10 @@ BEGIN(read)
16344 movl (v), %eax
16345 movl 4(v), %edx
16346 RET_ENDP
16347 +BEGIN(read_unchecked)
16348 + movl (v), %eax
16349 + movl 4(v), %edx
16350 +RET_ENDP
16351 #undef v
16352
16353 #define v %esi
16354 @@ -55,6 +59,10 @@ BEGIN(set)
16355 movl %ebx, (v)
16356 movl %ecx, 4(v)
16357 RET_ENDP
16358 +BEGIN(set_unchecked)
16359 + movl %ebx, (v)
16360 + movl %ecx, 4(v)
16361 +RET_ENDP
16362 #undef v
16363
16364 #define v %esi
16365 @@ -70,6 +78,20 @@ RET_ENDP
16366 BEGIN(add)
16367 addl %eax, (v)
16368 adcl %edx, 4(v)
16369 +
16370 +#ifdef CONFIG_PAX_REFCOUNT
16371 + jno 0f
16372 + subl %eax, (v)
16373 + sbbl %edx, 4(v)
16374 + int $4
16375 +0:
16376 + _ASM_EXTABLE(0b, 0b)
16377 +#endif
16378 +
16379 +RET_ENDP
16380 +BEGIN(add_unchecked)
16381 + addl %eax, (v)
16382 + adcl %edx, 4(v)
16383 RET_ENDP
16384 #undef v
16385
16386 @@ -77,6 +99,24 @@ RET_ENDP
16387 BEGIN(add_return)
16388 addl (v), %eax
16389 adcl 4(v), %edx
16390 +
16391 +#ifdef CONFIG_PAX_REFCOUNT
16392 + into
16393 +1234:
16394 + _ASM_EXTABLE(1234b, 2f)
16395 +#endif
16396 +
16397 + movl %eax, (v)
16398 + movl %edx, 4(v)
16399 +
16400 +#ifdef CONFIG_PAX_REFCOUNT
16401 +2:
16402 +#endif
16403 +
16404 +RET_ENDP
16405 +BEGIN(add_return_unchecked)
16406 + addl (v), %eax
16407 + adcl 4(v), %edx
16408 movl %eax, (v)
16409 movl %edx, 4(v)
16410 RET_ENDP
16411 @@ -86,6 +126,20 @@ RET_ENDP
16412 BEGIN(sub)
16413 subl %eax, (v)
16414 sbbl %edx, 4(v)
16415 +
16416 +#ifdef CONFIG_PAX_REFCOUNT
16417 + jno 0f
16418 + addl %eax, (v)
16419 + adcl %edx, 4(v)
16420 + int $4
16421 +0:
16422 + _ASM_EXTABLE(0b, 0b)
16423 +#endif
16424 +
16425 +RET_ENDP
16426 +BEGIN(sub_unchecked)
16427 + subl %eax, (v)
16428 + sbbl %edx, 4(v)
16429 RET_ENDP
16430 #undef v
16431
16432 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16433 sbbl $0, %edx
16434 addl (v), %eax
16435 adcl 4(v), %edx
16436 +
16437 +#ifdef CONFIG_PAX_REFCOUNT
16438 + into
16439 +1234:
16440 + _ASM_EXTABLE(1234b, 2f)
16441 +#endif
16442 +
16443 + movl %eax, (v)
16444 + movl %edx, 4(v)
16445 +
16446 +#ifdef CONFIG_PAX_REFCOUNT
16447 +2:
16448 +#endif
16449 +
16450 +RET_ENDP
16451 +BEGIN(sub_return_unchecked)
16452 + negl %edx
16453 + negl %eax
16454 + sbbl $0, %edx
16455 + addl (v), %eax
16456 + adcl 4(v), %edx
16457 movl %eax, (v)
16458 movl %edx, 4(v)
16459 RET_ENDP
16460 @@ -105,6 +180,20 @@ RET_ENDP
16461 BEGIN(inc)
16462 addl $1, (v)
16463 adcl $0, 4(v)
16464 +
16465 +#ifdef CONFIG_PAX_REFCOUNT
16466 + jno 0f
16467 + subl $1, (v)
16468 + sbbl $0, 4(v)
16469 + int $4
16470 +0:
16471 + _ASM_EXTABLE(0b, 0b)
16472 +#endif
16473 +
16474 +RET_ENDP
16475 +BEGIN(inc_unchecked)
16476 + addl $1, (v)
16477 + adcl $0, 4(v)
16478 RET_ENDP
16479 #undef v
16480
16481 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16482 movl 4(v), %edx
16483 addl $1, %eax
16484 adcl $0, %edx
16485 +
16486 +#ifdef CONFIG_PAX_REFCOUNT
16487 + into
16488 +1234:
16489 + _ASM_EXTABLE(1234b, 2f)
16490 +#endif
16491 +
16492 + movl %eax, (v)
16493 + movl %edx, 4(v)
16494 +
16495 +#ifdef CONFIG_PAX_REFCOUNT
16496 +2:
16497 +#endif
16498 +
16499 +RET_ENDP
16500 +BEGIN(inc_return_unchecked)
16501 + movl (v), %eax
16502 + movl 4(v), %edx
16503 + addl $1, %eax
16504 + adcl $0, %edx
16505 movl %eax, (v)
16506 movl %edx, 4(v)
16507 RET_ENDP
16508 @@ -123,6 +232,20 @@ RET_ENDP
16509 BEGIN(dec)
16510 subl $1, (v)
16511 sbbl $0, 4(v)
16512 +
16513 +#ifdef CONFIG_PAX_REFCOUNT
16514 + jno 0f
16515 + addl $1, (v)
16516 + adcl $0, 4(v)
16517 + int $4
16518 +0:
16519 + _ASM_EXTABLE(0b, 0b)
16520 +#endif
16521 +
16522 +RET_ENDP
16523 +BEGIN(dec_unchecked)
16524 + subl $1, (v)
16525 + sbbl $0, 4(v)
16526 RET_ENDP
16527 #undef v
16528
16529 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16530 movl 4(v), %edx
16531 subl $1, %eax
16532 sbbl $0, %edx
16533 +
16534 +#ifdef CONFIG_PAX_REFCOUNT
16535 + into
16536 +1234:
16537 + _ASM_EXTABLE(1234b, 2f)
16538 +#endif
16539 +
16540 + movl %eax, (v)
16541 + movl %edx, 4(v)
16542 +
16543 +#ifdef CONFIG_PAX_REFCOUNT
16544 +2:
16545 +#endif
16546 +
16547 +RET_ENDP
16548 +BEGIN(dec_return_unchecked)
16549 + movl (v), %eax
16550 + movl 4(v), %edx
16551 + subl $1, %eax
16552 + sbbl $0, %edx
16553 movl %eax, (v)
16554 movl %edx, 4(v)
16555 RET_ENDP
16556 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16557 adcl %edx, %edi
16558 addl (v), %eax
16559 adcl 4(v), %edx
16560 +
16561 +#ifdef CONFIG_PAX_REFCOUNT
16562 + into
16563 +1234:
16564 + _ASM_EXTABLE(1234b, 2f)
16565 +#endif
16566 +
16567 cmpl %eax, %esi
16568 je 3f
16569 1:
16570 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16571 1:
16572 addl $1, %eax
16573 adcl $0, %edx
16574 +
16575 +#ifdef CONFIG_PAX_REFCOUNT
16576 + into
16577 +1234:
16578 + _ASM_EXTABLE(1234b, 2f)
16579 +#endif
16580 +
16581 movl %eax, (v)
16582 movl %edx, 4(v)
16583 movl $1, %eax
16584 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16585 movl 4(v), %edx
16586 subl $1, %eax
16587 sbbl $0, %edx
16588 +
16589 +#ifdef CONFIG_PAX_REFCOUNT
16590 + into
16591 +1234:
16592 + _ASM_EXTABLE(1234b, 1f)
16593 +#endif
16594 +
16595 js 1f
16596 movl %eax, (v)
16597 movl %edx, 4(v)
16598 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S
16599 --- linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-05-19 00:06:34.000000000 -0400
16600 +++ linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-05 19:44:35.000000000 -0400
16601 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16602 CFI_ENDPROC
16603 ENDPROC(atomic64_read_cx8)
16604
16605 +ENTRY(atomic64_read_unchecked_cx8)
16606 + CFI_STARTPROC
16607 +
16608 + read64 %ecx
16609 + ret
16610 + CFI_ENDPROC
16611 +ENDPROC(atomic64_read_unchecked_cx8)
16612 +
16613 ENTRY(atomic64_set_cx8)
16614 CFI_STARTPROC
16615
16616 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16617 CFI_ENDPROC
16618 ENDPROC(atomic64_set_cx8)
16619
16620 +ENTRY(atomic64_set_unchecked_cx8)
16621 + CFI_STARTPROC
16622 +
16623 +1:
16624 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16625 + * are atomic on 586 and newer */
16626 + cmpxchg8b (%esi)
16627 + jne 1b
16628 +
16629 + ret
16630 + CFI_ENDPROC
16631 +ENDPROC(atomic64_set_unchecked_cx8)
16632 +
16633 ENTRY(atomic64_xchg_cx8)
16634 CFI_STARTPROC
16635
16636 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16637 CFI_ENDPROC
16638 ENDPROC(atomic64_xchg_cx8)
16639
16640 -.macro addsub_return func ins insc
16641 -ENTRY(atomic64_\func\()_return_cx8)
16642 +.macro addsub_return func ins insc unchecked=""
16643 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16644 CFI_STARTPROC
16645 SAVE ebp
16646 SAVE ebx
16647 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16648 movl %edx, %ecx
16649 \ins\()l %esi, %ebx
16650 \insc\()l %edi, %ecx
16651 +
16652 +.ifb \unchecked
16653 +#ifdef CONFIG_PAX_REFCOUNT
16654 + into
16655 +2:
16656 + _ASM_EXTABLE(2b, 3f)
16657 +#endif
16658 +.endif
16659 +
16660 LOCK_PREFIX
16661 cmpxchg8b (%ebp)
16662 jne 1b
16663 -
16664 -10:
16665 movl %ebx, %eax
16666 movl %ecx, %edx
16667 +
16668 +.ifb \unchecked
16669 +#ifdef CONFIG_PAX_REFCOUNT
16670 +3:
16671 +#endif
16672 +.endif
16673 +
16674 RESTORE edi
16675 RESTORE esi
16676 RESTORE ebx
16677 RESTORE ebp
16678 ret
16679 CFI_ENDPROC
16680 -ENDPROC(atomic64_\func\()_return_cx8)
16681 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16682 .endm
16683
16684 addsub_return add add adc
16685 addsub_return sub sub sbb
16686 +addsub_return add add adc _unchecked
16687 +addsub_return sub sub sbb _unchecked
16688
16689 -.macro incdec_return func ins insc
16690 -ENTRY(atomic64_\func\()_return_cx8)
16691 +.macro incdec_return func ins insc unchecked
16692 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16693 CFI_STARTPROC
16694 SAVE ebx
16695
16696 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16697 movl %edx, %ecx
16698 \ins\()l $1, %ebx
16699 \insc\()l $0, %ecx
16700 +
16701 +.ifb \unchecked
16702 +#ifdef CONFIG_PAX_REFCOUNT
16703 + into
16704 +2:
16705 + _ASM_EXTABLE(2b, 3f)
16706 +#endif
16707 +.endif
16708 +
16709 LOCK_PREFIX
16710 cmpxchg8b (%esi)
16711 jne 1b
16712
16713 -10:
16714 movl %ebx, %eax
16715 movl %ecx, %edx
16716 +
16717 +.ifb \unchecked
16718 +#ifdef CONFIG_PAX_REFCOUNT
16719 +3:
16720 +#endif
16721 +.endif
16722 +
16723 RESTORE ebx
16724 ret
16725 CFI_ENDPROC
16726 -ENDPROC(atomic64_\func\()_return_cx8)
16727 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16728 .endm
16729
16730 incdec_return inc add adc
16731 incdec_return dec sub sbb
16732 +incdec_return inc add adc _unchecked
16733 +incdec_return dec sub sbb _unchecked
16734
16735 ENTRY(atomic64_dec_if_positive_cx8)
16736 CFI_STARTPROC
16737 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16738 movl %edx, %ecx
16739 subl $1, %ebx
16740 sbb $0, %ecx
16741 +
16742 +#ifdef CONFIG_PAX_REFCOUNT
16743 + into
16744 +1234:
16745 + _ASM_EXTABLE(1234b, 2f)
16746 +#endif
16747 +
16748 js 2f
16749 LOCK_PREFIX
16750 cmpxchg8b (%esi)
16751 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16752 movl %edx, %ecx
16753 addl %esi, %ebx
16754 adcl %edi, %ecx
16755 +
16756 +#ifdef CONFIG_PAX_REFCOUNT
16757 + into
16758 +1234:
16759 + _ASM_EXTABLE(1234b, 3f)
16760 +#endif
16761 +
16762 LOCK_PREFIX
16763 cmpxchg8b (%ebp)
16764 jne 1b
16765 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16766 movl %edx, %ecx
16767 addl $1, %ebx
16768 adcl $0, %ecx
16769 +
16770 +#ifdef CONFIG_PAX_REFCOUNT
16771 + into
16772 +1234:
16773 + _ASM_EXTABLE(1234b, 3f)
16774 +#endif
16775 +
16776 LOCK_PREFIX
16777 cmpxchg8b (%esi)
16778 jne 1b
16779 diff -urNp linux-2.6.39.4/arch/x86/lib/checksum_32.S linux-2.6.39.4/arch/x86/lib/checksum_32.S
16780 --- linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-05-19 00:06:34.000000000 -0400
16781 +++ linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-08-05 19:44:35.000000000 -0400
16782 @@ -28,7 +28,8 @@
16783 #include <linux/linkage.h>
16784 #include <asm/dwarf2.h>
16785 #include <asm/errno.h>
16786 -
16787 +#include <asm/segment.h>
16788 +
16789 /*
16790 * computes a partial checksum, e.g. for TCP/UDP fragments
16791 */
16792 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16793
16794 #define ARGBASE 16
16795 #define FP 12
16796 -
16797 -ENTRY(csum_partial_copy_generic)
16798 +
16799 +ENTRY(csum_partial_copy_generic_to_user)
16800 CFI_STARTPROC
16801 +
16802 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16803 + pushl_cfi %gs
16804 + popl_cfi %es
16805 + jmp csum_partial_copy_generic
16806 +#endif
16807 +
16808 +ENTRY(csum_partial_copy_generic_from_user)
16809 +
16810 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16811 + pushl_cfi %gs
16812 + popl_cfi %ds
16813 +#endif
16814 +
16815 +ENTRY(csum_partial_copy_generic)
16816 subl $4,%esp
16817 CFI_ADJUST_CFA_OFFSET 4
16818 pushl_cfi %edi
16819 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16820 jmp 4f
16821 SRC(1: movw (%esi), %bx )
16822 addl $2, %esi
16823 -DST( movw %bx, (%edi) )
16824 +DST( movw %bx, %es:(%edi) )
16825 addl $2, %edi
16826 addw %bx, %ax
16827 adcl $0, %eax
16828 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16829 SRC(1: movl (%esi), %ebx )
16830 SRC( movl 4(%esi), %edx )
16831 adcl %ebx, %eax
16832 -DST( movl %ebx, (%edi) )
16833 +DST( movl %ebx, %es:(%edi) )
16834 adcl %edx, %eax
16835 -DST( movl %edx, 4(%edi) )
16836 +DST( movl %edx, %es:4(%edi) )
16837
16838 SRC( movl 8(%esi), %ebx )
16839 SRC( movl 12(%esi), %edx )
16840 adcl %ebx, %eax
16841 -DST( movl %ebx, 8(%edi) )
16842 +DST( movl %ebx, %es:8(%edi) )
16843 adcl %edx, %eax
16844 -DST( movl %edx, 12(%edi) )
16845 +DST( movl %edx, %es:12(%edi) )
16846
16847 SRC( movl 16(%esi), %ebx )
16848 SRC( movl 20(%esi), %edx )
16849 adcl %ebx, %eax
16850 -DST( movl %ebx, 16(%edi) )
16851 +DST( movl %ebx, %es:16(%edi) )
16852 adcl %edx, %eax
16853 -DST( movl %edx, 20(%edi) )
16854 +DST( movl %edx, %es:20(%edi) )
16855
16856 SRC( movl 24(%esi), %ebx )
16857 SRC( movl 28(%esi), %edx )
16858 adcl %ebx, %eax
16859 -DST( movl %ebx, 24(%edi) )
16860 +DST( movl %ebx, %es:24(%edi) )
16861 adcl %edx, %eax
16862 -DST( movl %edx, 28(%edi) )
16863 +DST( movl %edx, %es:28(%edi) )
16864
16865 lea 32(%esi), %esi
16866 lea 32(%edi), %edi
16867 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16868 shrl $2, %edx # This clears CF
16869 SRC(3: movl (%esi), %ebx )
16870 adcl %ebx, %eax
16871 -DST( movl %ebx, (%edi) )
16872 +DST( movl %ebx, %es:(%edi) )
16873 lea 4(%esi), %esi
16874 lea 4(%edi), %edi
16875 dec %edx
16876 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16877 jb 5f
16878 SRC( movw (%esi), %cx )
16879 leal 2(%esi), %esi
16880 -DST( movw %cx, (%edi) )
16881 +DST( movw %cx, %es:(%edi) )
16882 leal 2(%edi), %edi
16883 je 6f
16884 shll $16,%ecx
16885 SRC(5: movb (%esi), %cl )
16886 -DST( movb %cl, (%edi) )
16887 +DST( movb %cl, %es:(%edi) )
16888 6: addl %ecx, %eax
16889 adcl $0, %eax
16890 7:
16891 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16892
16893 6001:
16894 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16895 - movl $-EFAULT, (%ebx)
16896 + movl $-EFAULT, %ss:(%ebx)
16897
16898 # zero the complete destination - computing the rest
16899 # is too much work
16900 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16901
16902 6002:
16903 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16904 - movl $-EFAULT,(%ebx)
16905 + movl $-EFAULT,%ss:(%ebx)
16906 jmp 5000b
16907
16908 .previous
16909
16910 + pushl_cfi %ss
16911 + popl_cfi %ds
16912 + pushl_cfi %ss
16913 + popl_cfi %es
16914 popl_cfi %ebx
16915 CFI_RESTORE ebx
16916 popl_cfi %esi
16917 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16918 popl_cfi %ecx # equivalent to addl $4,%esp
16919 ret
16920 CFI_ENDPROC
16921 -ENDPROC(csum_partial_copy_generic)
16922 +ENDPROC(csum_partial_copy_generic_to_user)
16923
16924 #else
16925
16926 /* Version for PentiumII/PPro */
16927
16928 #define ROUND1(x) \
16929 + nop; nop; nop; \
16930 SRC(movl x(%esi), %ebx ) ; \
16931 addl %ebx, %eax ; \
16932 - DST(movl %ebx, x(%edi) ) ;
16933 + DST(movl %ebx, %es:x(%edi)) ;
16934
16935 #define ROUND(x) \
16936 + nop; nop; nop; \
16937 SRC(movl x(%esi), %ebx ) ; \
16938 adcl %ebx, %eax ; \
16939 - DST(movl %ebx, x(%edi) ) ;
16940 + DST(movl %ebx, %es:x(%edi)) ;
16941
16942 #define ARGBASE 12
16943 -
16944 -ENTRY(csum_partial_copy_generic)
16945 +
16946 +ENTRY(csum_partial_copy_generic_to_user)
16947 CFI_STARTPROC
16948 +
16949 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16950 + pushl_cfi %gs
16951 + popl_cfi %es
16952 + jmp csum_partial_copy_generic
16953 +#endif
16954 +
16955 +ENTRY(csum_partial_copy_generic_from_user)
16956 +
16957 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16958 + pushl_cfi %gs
16959 + popl_cfi %ds
16960 +#endif
16961 +
16962 +ENTRY(csum_partial_copy_generic)
16963 pushl_cfi %ebx
16964 CFI_REL_OFFSET ebx, 0
16965 pushl_cfi %edi
16966 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
16967 subl %ebx, %edi
16968 lea -1(%esi),%edx
16969 andl $-32,%edx
16970 - lea 3f(%ebx,%ebx), %ebx
16971 + lea 3f(%ebx,%ebx,2), %ebx
16972 testl %esi, %esi
16973 jmp *%ebx
16974 1: addl $64,%esi
16975 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
16976 jb 5f
16977 SRC( movw (%esi), %dx )
16978 leal 2(%esi), %esi
16979 -DST( movw %dx, (%edi) )
16980 +DST( movw %dx, %es:(%edi) )
16981 leal 2(%edi), %edi
16982 je 6f
16983 shll $16,%edx
16984 5:
16985 SRC( movb (%esi), %dl )
16986 -DST( movb %dl, (%edi) )
16987 +DST( movb %dl, %es:(%edi) )
16988 6: addl %edx, %eax
16989 adcl $0, %eax
16990 7:
16991 .section .fixup, "ax"
16992 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
16993 - movl $-EFAULT, (%ebx)
16994 + movl $-EFAULT, %ss:(%ebx)
16995 # zero the complete destination (computing the rest is too much work)
16996 movl ARGBASE+8(%esp),%edi # dst
16997 movl ARGBASE+12(%esp),%ecx # len
16998 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
16999 rep; stosb
17000 jmp 7b
17001 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17002 - movl $-EFAULT, (%ebx)
17003 + movl $-EFAULT, %ss:(%ebx)
17004 jmp 7b
17005 .previous
17006
17007 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17008 + pushl_cfi %ss
17009 + popl_cfi %ds
17010 + pushl_cfi %ss
17011 + popl_cfi %es
17012 +#endif
17013 +
17014 popl_cfi %esi
17015 CFI_RESTORE esi
17016 popl_cfi %edi
17017 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17018 CFI_RESTORE ebx
17019 ret
17020 CFI_ENDPROC
17021 -ENDPROC(csum_partial_copy_generic)
17022 +ENDPROC(csum_partial_copy_generic_to_user)
17023
17024 #undef ROUND
17025 #undef ROUND1
17026 diff -urNp linux-2.6.39.4/arch/x86/lib/clear_page_64.S linux-2.6.39.4/arch/x86/lib/clear_page_64.S
17027 --- linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-05-19 00:06:34.000000000 -0400
17028 +++ linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-08-05 19:44:35.000000000 -0400
17029 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
17030
17031 #include <asm/cpufeature.h>
17032
17033 - .section .altinstr_replacement,"ax"
17034 + .section .altinstr_replacement,"a"
17035 1: .byte 0xeb /* jmp <disp8> */
17036 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17037 2:
17038 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_page_64.S linux-2.6.39.4/arch/x86/lib/copy_page_64.S
17039 --- linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-05-19 00:06:34.000000000 -0400
17040 +++ linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-08-05 19:44:35.000000000 -0400
17041 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
17042
17043 #include <asm/cpufeature.h>
17044
17045 - .section .altinstr_replacement,"ax"
17046 + .section .altinstr_replacement,"a"
17047 1: .byte 0xeb /* jmp <disp8> */
17048 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17049 2:
17050 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_64.S linux-2.6.39.4/arch/x86/lib/copy_user_64.S
17051 --- linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-06-03 00:04:13.000000000 -0400
17052 +++ linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-08-05 19:44:35.000000000 -0400
17053 @@ -15,13 +15,14 @@
17054 #include <asm/asm-offsets.h>
17055 #include <asm/thread_info.h>
17056 #include <asm/cpufeature.h>
17057 +#include <asm/pgtable.h>
17058
17059 .macro ALTERNATIVE_JUMP feature,orig,alt
17060 0:
17061 .byte 0xe9 /* 32bit jump */
17062 .long \orig-1f /* by default jump to orig */
17063 1:
17064 - .section .altinstr_replacement,"ax"
17065 + .section .altinstr_replacement,"a"
17066 2: .byte 0xe9 /* near jump with 32bit immediate */
17067 .long \alt-1b /* offset */ /* or alternatively to alt */
17068 .previous
17069 @@ -64,37 +65,13 @@
17070 #endif
17071 .endm
17072
17073 -/* Standard copy_to_user with segment limit checking */
17074 -ENTRY(_copy_to_user)
17075 - CFI_STARTPROC
17076 - GET_THREAD_INFO(%rax)
17077 - movq %rdi,%rcx
17078 - addq %rdx,%rcx
17079 - jc bad_to_user
17080 - cmpq TI_addr_limit(%rax),%rcx
17081 - ja bad_to_user
17082 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17083 - CFI_ENDPROC
17084 -ENDPROC(_copy_to_user)
17085 -
17086 -/* Standard copy_from_user with segment limit checking */
17087 -ENTRY(_copy_from_user)
17088 - CFI_STARTPROC
17089 - GET_THREAD_INFO(%rax)
17090 - movq %rsi,%rcx
17091 - addq %rdx,%rcx
17092 - jc bad_from_user
17093 - cmpq TI_addr_limit(%rax),%rcx
17094 - ja bad_from_user
17095 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17096 - CFI_ENDPROC
17097 -ENDPROC(_copy_from_user)
17098 -
17099 .section .fixup,"ax"
17100 /* must zero dest */
17101 ENTRY(bad_from_user)
17102 bad_from_user:
17103 CFI_STARTPROC
17104 + testl %edx,%edx
17105 + js bad_to_user
17106 movl %edx,%ecx
17107 xorl %eax,%eax
17108 rep
17109 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S
17110 --- linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-05-19 00:06:34.000000000 -0400
17111 +++ linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-05 19:44:35.000000000 -0400
17112 @@ -14,6 +14,7 @@
17113 #include <asm/current.h>
17114 #include <asm/asm-offsets.h>
17115 #include <asm/thread_info.h>
17116 +#include <asm/pgtable.h>
17117
17118 .macro ALIGN_DESTINATION
17119 #ifdef FIX_ALIGNMENT
17120 @@ -50,6 +51,15 @@
17121 */
17122 ENTRY(__copy_user_nocache)
17123 CFI_STARTPROC
17124 +
17125 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17126 + mov $PAX_USER_SHADOW_BASE,%rcx
17127 + cmp %rcx,%rsi
17128 + jae 1f
17129 + add %rcx,%rsi
17130 +1:
17131 +#endif
17132 +
17133 cmpl $8,%edx
17134 jb 20f /* less then 8 bytes, go to byte copy loop */
17135 ALIGN_DESTINATION
17136 diff -urNp linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c
17137 --- linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-05-19 00:06:34.000000000 -0400
17138 +++ linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-08-05 19:44:35.000000000 -0400
17139 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17140 len -= 2;
17141 }
17142 }
17143 +
17144 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17145 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17146 + src += PAX_USER_SHADOW_BASE;
17147 +#endif
17148 +
17149 isum = csum_partial_copy_generic((__force const void *)src,
17150 dst, len, isum, errp, NULL);
17151 if (unlikely(*errp))
17152 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17153 }
17154
17155 *errp = 0;
17156 +
17157 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17158 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17159 + dst += PAX_USER_SHADOW_BASE;
17160 +#endif
17161 +
17162 return csum_partial_copy_generic(src, (void __force *)dst,
17163 len, isum, NULL, errp);
17164 }
17165 diff -urNp linux-2.6.39.4/arch/x86/lib/getuser.S linux-2.6.39.4/arch/x86/lib/getuser.S
17166 --- linux-2.6.39.4/arch/x86/lib/getuser.S 2011-05-19 00:06:34.000000000 -0400
17167 +++ linux-2.6.39.4/arch/x86/lib/getuser.S 2011-08-05 19:44:35.000000000 -0400
17168 @@ -33,14 +33,35 @@
17169 #include <asm/asm-offsets.h>
17170 #include <asm/thread_info.h>
17171 #include <asm/asm.h>
17172 +#include <asm/segment.h>
17173 +#include <asm/pgtable.h>
17174 +
17175 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17176 +#define __copyuser_seg gs;
17177 +#else
17178 +#define __copyuser_seg
17179 +#endif
17180
17181 .text
17182 ENTRY(__get_user_1)
17183 CFI_STARTPROC
17184 +
17185 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17186 GET_THREAD_INFO(%_ASM_DX)
17187 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17188 jae bad_get_user
17189 -1: movzb (%_ASM_AX),%edx
17190 +
17191 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17192 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17193 + cmp %_ASM_DX,%_ASM_AX
17194 + jae 1234f
17195 + add %_ASM_DX,%_ASM_AX
17196 +1234:
17197 +#endif
17198 +
17199 +#endif
17200 +
17201 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17202 xor %eax,%eax
17203 ret
17204 CFI_ENDPROC
17205 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17206 ENTRY(__get_user_2)
17207 CFI_STARTPROC
17208 add $1,%_ASM_AX
17209 +
17210 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17211 jc bad_get_user
17212 GET_THREAD_INFO(%_ASM_DX)
17213 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17214 jae bad_get_user
17215 -2: movzwl -1(%_ASM_AX),%edx
17216 +
17217 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17218 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17219 + cmp %_ASM_DX,%_ASM_AX
17220 + jae 1234f
17221 + add %_ASM_DX,%_ASM_AX
17222 +1234:
17223 +#endif
17224 +
17225 +#endif
17226 +
17227 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17228 xor %eax,%eax
17229 ret
17230 CFI_ENDPROC
17231 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17232 ENTRY(__get_user_4)
17233 CFI_STARTPROC
17234 add $3,%_ASM_AX
17235 +
17236 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17237 jc bad_get_user
17238 GET_THREAD_INFO(%_ASM_DX)
17239 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17240 jae bad_get_user
17241 -3: mov -3(%_ASM_AX),%edx
17242 +
17243 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17244 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17245 + cmp %_ASM_DX,%_ASM_AX
17246 + jae 1234f
17247 + add %_ASM_DX,%_ASM_AX
17248 +1234:
17249 +#endif
17250 +
17251 +#endif
17252 +
17253 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17254 xor %eax,%eax
17255 ret
17256 CFI_ENDPROC
17257 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17258 GET_THREAD_INFO(%_ASM_DX)
17259 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17260 jae bad_get_user
17261 +
17262 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17263 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17264 + cmp %_ASM_DX,%_ASM_AX
17265 + jae 1234f
17266 + add %_ASM_DX,%_ASM_AX
17267 +1234:
17268 +#endif
17269 +
17270 4: movq -7(%_ASM_AX),%_ASM_DX
17271 xor %eax,%eax
17272 ret
17273 diff -urNp linux-2.6.39.4/arch/x86/lib/insn.c linux-2.6.39.4/arch/x86/lib/insn.c
17274 --- linux-2.6.39.4/arch/x86/lib/insn.c 2011-05-19 00:06:34.000000000 -0400
17275 +++ linux-2.6.39.4/arch/x86/lib/insn.c 2011-08-05 19:44:35.000000000 -0400
17276 @@ -21,6 +21,11 @@
17277 #include <linux/string.h>
17278 #include <asm/inat.h>
17279 #include <asm/insn.h>
17280 +#ifdef __KERNEL__
17281 +#include <asm/pgtable_types.h>
17282 +#else
17283 +#define ktla_ktva(addr) addr
17284 +#endif
17285
17286 #define get_next(t, insn) \
17287 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17288 @@ -40,8 +45,8 @@
17289 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17290 {
17291 memset(insn, 0, sizeof(*insn));
17292 - insn->kaddr = kaddr;
17293 - insn->next_byte = kaddr;
17294 + insn->kaddr = ktla_ktva(kaddr);
17295 + insn->next_byte = ktla_ktva(kaddr);
17296 insn->x86_64 = x86_64 ? 1 : 0;
17297 insn->opnd_bytes = 4;
17298 if (x86_64)
17299 diff -urNp linux-2.6.39.4/arch/x86/lib/mmx_32.c linux-2.6.39.4/arch/x86/lib/mmx_32.c
17300 --- linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-05-19 00:06:34.000000000 -0400
17301 +++ linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-08-05 19:44:35.000000000 -0400
17302 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17303 {
17304 void *p;
17305 int i;
17306 + unsigned long cr0;
17307
17308 if (unlikely(in_interrupt()))
17309 return __memcpy(to, from, len);
17310 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17311 kernel_fpu_begin();
17312
17313 __asm__ __volatile__ (
17314 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17315 - " prefetch 64(%0)\n"
17316 - " prefetch 128(%0)\n"
17317 - " prefetch 192(%0)\n"
17318 - " prefetch 256(%0)\n"
17319 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17320 + " prefetch 64(%1)\n"
17321 + " prefetch 128(%1)\n"
17322 + " prefetch 192(%1)\n"
17323 + " prefetch 256(%1)\n"
17324 "2: \n"
17325 ".section .fixup, \"ax\"\n"
17326 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17327 + "3: \n"
17328 +
17329 +#ifdef CONFIG_PAX_KERNEXEC
17330 + " movl %%cr0, %0\n"
17331 + " movl %0, %%eax\n"
17332 + " andl $0xFFFEFFFF, %%eax\n"
17333 + " movl %%eax, %%cr0\n"
17334 +#endif
17335 +
17336 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17337 +
17338 +#ifdef CONFIG_PAX_KERNEXEC
17339 + " movl %0, %%cr0\n"
17340 +#endif
17341 +
17342 " jmp 2b\n"
17343 ".previous\n"
17344 _ASM_EXTABLE(1b, 3b)
17345 - : : "r" (from));
17346 + : "=&r" (cr0) : "r" (from) : "ax");
17347
17348 for ( ; i > 5; i--) {
17349 __asm__ __volatile__ (
17350 - "1: prefetch 320(%0)\n"
17351 - "2: movq (%0), %%mm0\n"
17352 - " movq 8(%0), %%mm1\n"
17353 - " movq 16(%0), %%mm2\n"
17354 - " movq 24(%0), %%mm3\n"
17355 - " movq %%mm0, (%1)\n"
17356 - " movq %%mm1, 8(%1)\n"
17357 - " movq %%mm2, 16(%1)\n"
17358 - " movq %%mm3, 24(%1)\n"
17359 - " movq 32(%0), %%mm0\n"
17360 - " movq 40(%0), %%mm1\n"
17361 - " movq 48(%0), %%mm2\n"
17362 - " movq 56(%0), %%mm3\n"
17363 - " movq %%mm0, 32(%1)\n"
17364 - " movq %%mm1, 40(%1)\n"
17365 - " movq %%mm2, 48(%1)\n"
17366 - " movq %%mm3, 56(%1)\n"
17367 + "1: prefetch 320(%1)\n"
17368 + "2: movq (%1), %%mm0\n"
17369 + " movq 8(%1), %%mm1\n"
17370 + " movq 16(%1), %%mm2\n"
17371 + " movq 24(%1), %%mm3\n"
17372 + " movq %%mm0, (%2)\n"
17373 + " movq %%mm1, 8(%2)\n"
17374 + " movq %%mm2, 16(%2)\n"
17375 + " movq %%mm3, 24(%2)\n"
17376 + " movq 32(%1), %%mm0\n"
17377 + " movq 40(%1), %%mm1\n"
17378 + " movq 48(%1), %%mm2\n"
17379 + " movq 56(%1), %%mm3\n"
17380 + " movq %%mm0, 32(%2)\n"
17381 + " movq %%mm1, 40(%2)\n"
17382 + " movq %%mm2, 48(%2)\n"
17383 + " movq %%mm3, 56(%2)\n"
17384 ".section .fixup, \"ax\"\n"
17385 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17386 + "3:\n"
17387 +
17388 +#ifdef CONFIG_PAX_KERNEXEC
17389 + " movl %%cr0, %0\n"
17390 + " movl %0, %%eax\n"
17391 + " andl $0xFFFEFFFF, %%eax\n"
17392 + " movl %%eax, %%cr0\n"
17393 +#endif
17394 +
17395 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17396 +
17397 +#ifdef CONFIG_PAX_KERNEXEC
17398 + " movl %0, %%cr0\n"
17399 +#endif
17400 +
17401 " jmp 2b\n"
17402 ".previous\n"
17403 _ASM_EXTABLE(1b, 3b)
17404 - : : "r" (from), "r" (to) : "memory");
17405 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17406
17407 from += 64;
17408 to += 64;
17409 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17410 static void fast_copy_page(void *to, void *from)
17411 {
17412 int i;
17413 + unsigned long cr0;
17414
17415 kernel_fpu_begin();
17416
17417 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17418 * but that is for later. -AV
17419 */
17420 __asm__ __volatile__(
17421 - "1: prefetch (%0)\n"
17422 - " prefetch 64(%0)\n"
17423 - " prefetch 128(%0)\n"
17424 - " prefetch 192(%0)\n"
17425 - " prefetch 256(%0)\n"
17426 + "1: prefetch (%1)\n"
17427 + " prefetch 64(%1)\n"
17428 + " prefetch 128(%1)\n"
17429 + " prefetch 192(%1)\n"
17430 + " prefetch 256(%1)\n"
17431 "2: \n"
17432 ".section .fixup, \"ax\"\n"
17433 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17434 + "3: \n"
17435 +
17436 +#ifdef CONFIG_PAX_KERNEXEC
17437 + " movl %%cr0, %0\n"
17438 + " movl %0, %%eax\n"
17439 + " andl $0xFFFEFFFF, %%eax\n"
17440 + " movl %%eax, %%cr0\n"
17441 +#endif
17442 +
17443 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17444 +
17445 +#ifdef CONFIG_PAX_KERNEXEC
17446 + " movl %0, %%cr0\n"
17447 +#endif
17448 +
17449 " jmp 2b\n"
17450 ".previous\n"
17451 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17452 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17453
17454 for (i = 0; i < (4096-320)/64; i++) {
17455 __asm__ __volatile__ (
17456 - "1: prefetch 320(%0)\n"
17457 - "2: movq (%0), %%mm0\n"
17458 - " movntq %%mm0, (%1)\n"
17459 - " movq 8(%0), %%mm1\n"
17460 - " movntq %%mm1, 8(%1)\n"
17461 - " movq 16(%0), %%mm2\n"
17462 - " movntq %%mm2, 16(%1)\n"
17463 - " movq 24(%0), %%mm3\n"
17464 - " movntq %%mm3, 24(%1)\n"
17465 - " movq 32(%0), %%mm4\n"
17466 - " movntq %%mm4, 32(%1)\n"
17467 - " movq 40(%0), %%mm5\n"
17468 - " movntq %%mm5, 40(%1)\n"
17469 - " movq 48(%0), %%mm6\n"
17470 - " movntq %%mm6, 48(%1)\n"
17471 - " movq 56(%0), %%mm7\n"
17472 - " movntq %%mm7, 56(%1)\n"
17473 + "1: prefetch 320(%1)\n"
17474 + "2: movq (%1), %%mm0\n"
17475 + " movntq %%mm0, (%2)\n"
17476 + " movq 8(%1), %%mm1\n"
17477 + " movntq %%mm1, 8(%2)\n"
17478 + " movq 16(%1), %%mm2\n"
17479 + " movntq %%mm2, 16(%2)\n"
17480 + " movq 24(%1), %%mm3\n"
17481 + " movntq %%mm3, 24(%2)\n"
17482 + " movq 32(%1), %%mm4\n"
17483 + " movntq %%mm4, 32(%2)\n"
17484 + " movq 40(%1), %%mm5\n"
17485 + " movntq %%mm5, 40(%2)\n"
17486 + " movq 48(%1), %%mm6\n"
17487 + " movntq %%mm6, 48(%2)\n"
17488 + " movq 56(%1), %%mm7\n"
17489 + " movntq %%mm7, 56(%2)\n"
17490 ".section .fixup, \"ax\"\n"
17491 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17492 + "3:\n"
17493 +
17494 +#ifdef CONFIG_PAX_KERNEXEC
17495 + " movl %%cr0, %0\n"
17496 + " movl %0, %%eax\n"
17497 + " andl $0xFFFEFFFF, %%eax\n"
17498 + " movl %%eax, %%cr0\n"
17499 +#endif
17500 +
17501 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17502 +
17503 +#ifdef CONFIG_PAX_KERNEXEC
17504 + " movl %0, %%cr0\n"
17505 +#endif
17506 +
17507 " jmp 2b\n"
17508 ".previous\n"
17509 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17510 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17511
17512 from += 64;
17513 to += 64;
17514 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17515 static void fast_copy_page(void *to, void *from)
17516 {
17517 int i;
17518 + unsigned long cr0;
17519
17520 kernel_fpu_begin();
17521
17522 __asm__ __volatile__ (
17523 - "1: prefetch (%0)\n"
17524 - " prefetch 64(%0)\n"
17525 - " prefetch 128(%0)\n"
17526 - " prefetch 192(%0)\n"
17527 - " prefetch 256(%0)\n"
17528 + "1: prefetch (%1)\n"
17529 + " prefetch 64(%1)\n"
17530 + " prefetch 128(%1)\n"
17531 + " prefetch 192(%1)\n"
17532 + " prefetch 256(%1)\n"
17533 "2: \n"
17534 ".section .fixup, \"ax\"\n"
17535 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17536 + "3: \n"
17537 +
17538 +#ifdef CONFIG_PAX_KERNEXEC
17539 + " movl %%cr0, %0\n"
17540 + " movl %0, %%eax\n"
17541 + " andl $0xFFFEFFFF, %%eax\n"
17542 + " movl %%eax, %%cr0\n"
17543 +#endif
17544 +
17545 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17546 +
17547 +#ifdef CONFIG_PAX_KERNEXEC
17548 + " movl %0, %%cr0\n"
17549 +#endif
17550 +
17551 " jmp 2b\n"
17552 ".previous\n"
17553 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17554 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17555
17556 for (i = 0; i < 4096/64; i++) {
17557 __asm__ __volatile__ (
17558 - "1: prefetch 320(%0)\n"
17559 - "2: movq (%0), %%mm0\n"
17560 - " movq 8(%0), %%mm1\n"
17561 - " movq 16(%0), %%mm2\n"
17562 - " movq 24(%0), %%mm3\n"
17563 - " movq %%mm0, (%1)\n"
17564 - " movq %%mm1, 8(%1)\n"
17565 - " movq %%mm2, 16(%1)\n"
17566 - " movq %%mm3, 24(%1)\n"
17567 - " movq 32(%0), %%mm0\n"
17568 - " movq 40(%0), %%mm1\n"
17569 - " movq 48(%0), %%mm2\n"
17570 - " movq 56(%0), %%mm3\n"
17571 - " movq %%mm0, 32(%1)\n"
17572 - " movq %%mm1, 40(%1)\n"
17573 - " movq %%mm2, 48(%1)\n"
17574 - " movq %%mm3, 56(%1)\n"
17575 + "1: prefetch 320(%1)\n"
17576 + "2: movq (%1), %%mm0\n"
17577 + " movq 8(%1), %%mm1\n"
17578 + " movq 16(%1), %%mm2\n"
17579 + " movq 24(%1), %%mm3\n"
17580 + " movq %%mm0, (%2)\n"
17581 + " movq %%mm1, 8(%2)\n"
17582 + " movq %%mm2, 16(%2)\n"
17583 + " movq %%mm3, 24(%2)\n"
17584 + " movq 32(%1), %%mm0\n"
17585 + " movq 40(%1), %%mm1\n"
17586 + " movq 48(%1), %%mm2\n"
17587 + " movq 56(%1), %%mm3\n"
17588 + " movq %%mm0, 32(%2)\n"
17589 + " movq %%mm1, 40(%2)\n"
17590 + " movq %%mm2, 48(%2)\n"
17591 + " movq %%mm3, 56(%2)\n"
17592 ".section .fixup, \"ax\"\n"
17593 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17594 + "3:\n"
17595 +
17596 +#ifdef CONFIG_PAX_KERNEXEC
17597 + " movl %%cr0, %0\n"
17598 + " movl %0, %%eax\n"
17599 + " andl $0xFFFEFFFF, %%eax\n"
17600 + " movl %%eax, %%cr0\n"
17601 +#endif
17602 +
17603 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17604 +
17605 +#ifdef CONFIG_PAX_KERNEXEC
17606 + " movl %0, %%cr0\n"
17607 +#endif
17608 +
17609 " jmp 2b\n"
17610 ".previous\n"
17611 _ASM_EXTABLE(1b, 3b)
17612 - : : "r" (from), "r" (to) : "memory");
17613 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17614
17615 from += 64;
17616 to += 64;
17617 diff -urNp linux-2.6.39.4/arch/x86/lib/putuser.S linux-2.6.39.4/arch/x86/lib/putuser.S
17618 --- linux-2.6.39.4/arch/x86/lib/putuser.S 2011-05-19 00:06:34.000000000 -0400
17619 +++ linux-2.6.39.4/arch/x86/lib/putuser.S 2011-08-05 19:44:35.000000000 -0400
17620 @@ -15,7 +15,8 @@
17621 #include <asm/thread_info.h>
17622 #include <asm/errno.h>
17623 #include <asm/asm.h>
17624 -
17625 +#include <asm/segment.h>
17626 +#include <asm/pgtable.h>
17627
17628 /*
17629 * __put_user_X
17630 @@ -29,52 +30,119 @@
17631 * as they get called from within inline assembly.
17632 */
17633
17634 -#define ENTER CFI_STARTPROC ; \
17635 - GET_THREAD_INFO(%_ASM_BX)
17636 +#define ENTER CFI_STARTPROC
17637 #define EXIT ret ; \
17638 CFI_ENDPROC
17639
17640 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17641 +#define _DEST %_ASM_CX,%_ASM_BX
17642 +#else
17643 +#define _DEST %_ASM_CX
17644 +#endif
17645 +
17646 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17647 +#define __copyuser_seg gs;
17648 +#else
17649 +#define __copyuser_seg
17650 +#endif
17651 +
17652 .text
17653 ENTRY(__put_user_1)
17654 ENTER
17655 +
17656 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17657 + GET_THREAD_INFO(%_ASM_BX)
17658 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17659 jae bad_put_user
17660 -1: movb %al,(%_ASM_CX)
17661 +
17662 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17663 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17664 + cmp %_ASM_BX,%_ASM_CX
17665 + jb 1234f
17666 + xor %ebx,%ebx
17667 +1234:
17668 +#endif
17669 +
17670 +#endif
17671 +
17672 +1: __copyuser_seg movb %al,(_DEST)
17673 xor %eax,%eax
17674 EXIT
17675 ENDPROC(__put_user_1)
17676
17677 ENTRY(__put_user_2)
17678 ENTER
17679 +
17680 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17681 + GET_THREAD_INFO(%_ASM_BX)
17682 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17683 sub $1,%_ASM_BX
17684 cmp %_ASM_BX,%_ASM_CX
17685 jae bad_put_user
17686 -2: movw %ax,(%_ASM_CX)
17687 +
17688 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17689 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17690 + cmp %_ASM_BX,%_ASM_CX
17691 + jb 1234f
17692 + xor %ebx,%ebx
17693 +1234:
17694 +#endif
17695 +
17696 +#endif
17697 +
17698 +2: __copyuser_seg movw %ax,(_DEST)
17699 xor %eax,%eax
17700 EXIT
17701 ENDPROC(__put_user_2)
17702
17703 ENTRY(__put_user_4)
17704 ENTER
17705 +
17706 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17707 + GET_THREAD_INFO(%_ASM_BX)
17708 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17709 sub $3,%_ASM_BX
17710 cmp %_ASM_BX,%_ASM_CX
17711 jae bad_put_user
17712 -3: movl %eax,(%_ASM_CX)
17713 +
17714 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17715 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17716 + cmp %_ASM_BX,%_ASM_CX
17717 + jb 1234f
17718 + xor %ebx,%ebx
17719 +1234:
17720 +#endif
17721 +
17722 +#endif
17723 +
17724 +3: __copyuser_seg movl %eax,(_DEST)
17725 xor %eax,%eax
17726 EXIT
17727 ENDPROC(__put_user_4)
17728
17729 ENTRY(__put_user_8)
17730 ENTER
17731 +
17732 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17733 + GET_THREAD_INFO(%_ASM_BX)
17734 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17735 sub $7,%_ASM_BX
17736 cmp %_ASM_BX,%_ASM_CX
17737 jae bad_put_user
17738 -4: mov %_ASM_AX,(%_ASM_CX)
17739 +
17740 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17741 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17742 + cmp %_ASM_BX,%_ASM_CX
17743 + jb 1234f
17744 + xor %ebx,%ebx
17745 +1234:
17746 +#endif
17747 +
17748 +#endif
17749 +
17750 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17751 #ifdef CONFIG_X86_32
17752 -5: movl %edx,4(%_ASM_CX)
17753 +5: __copyuser_seg movl %edx,4(_DEST)
17754 #endif
17755 xor %eax,%eax
17756 EXIT
17757 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_32.c linux-2.6.39.4/arch/x86/lib/usercopy_32.c
17758 --- linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-05-19 00:06:34.000000000 -0400
17759 +++ linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-08-05 19:44:35.000000000 -0400
17760 @@ -43,7 +43,7 @@ do { \
17761 __asm__ __volatile__( \
17762 " testl %1,%1\n" \
17763 " jz 2f\n" \
17764 - "0: lodsb\n" \
17765 + "0: "__copyuser_seg"lodsb\n" \
17766 " stosb\n" \
17767 " testb %%al,%%al\n" \
17768 " jz 1f\n" \
17769 @@ -128,10 +128,12 @@ do { \
17770 int __d0; \
17771 might_fault(); \
17772 __asm__ __volatile__( \
17773 + __COPYUSER_SET_ES \
17774 "0: rep; stosl\n" \
17775 " movl %2,%0\n" \
17776 "1: rep; stosb\n" \
17777 "2:\n" \
17778 + __COPYUSER_RESTORE_ES \
17779 ".section .fixup,\"ax\"\n" \
17780 "3: lea 0(%2,%0,4),%0\n" \
17781 " jmp 2b\n" \
17782 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17783 might_fault();
17784
17785 __asm__ __volatile__(
17786 + __COPYUSER_SET_ES
17787 " testl %0, %0\n"
17788 " jz 3f\n"
17789 " andl %0,%%ecx\n"
17790 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17791 " subl %%ecx,%0\n"
17792 " addl %0,%%eax\n"
17793 "1:\n"
17794 + __COPYUSER_RESTORE_ES
17795 ".section .fixup,\"ax\"\n"
17796 "2: xorl %%eax,%%eax\n"
17797 " jmp 1b\n"
17798 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17799
17800 #ifdef CONFIG_X86_INTEL_USERCOPY
17801 static unsigned long
17802 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17803 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17804 {
17805 int d0, d1;
17806 __asm__ __volatile__(
17807 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17808 " .align 2,0x90\n"
17809 "3: movl 0(%4), %%eax\n"
17810 "4: movl 4(%4), %%edx\n"
17811 - "5: movl %%eax, 0(%3)\n"
17812 - "6: movl %%edx, 4(%3)\n"
17813 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17814 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17815 "7: movl 8(%4), %%eax\n"
17816 "8: movl 12(%4),%%edx\n"
17817 - "9: movl %%eax, 8(%3)\n"
17818 - "10: movl %%edx, 12(%3)\n"
17819 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17820 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17821 "11: movl 16(%4), %%eax\n"
17822 "12: movl 20(%4), %%edx\n"
17823 - "13: movl %%eax, 16(%3)\n"
17824 - "14: movl %%edx, 20(%3)\n"
17825 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17826 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17827 "15: movl 24(%4), %%eax\n"
17828 "16: movl 28(%4), %%edx\n"
17829 - "17: movl %%eax, 24(%3)\n"
17830 - "18: movl %%edx, 28(%3)\n"
17831 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17832 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17833 "19: movl 32(%4), %%eax\n"
17834 "20: movl 36(%4), %%edx\n"
17835 - "21: movl %%eax, 32(%3)\n"
17836 - "22: movl %%edx, 36(%3)\n"
17837 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17838 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17839 "23: movl 40(%4), %%eax\n"
17840 "24: movl 44(%4), %%edx\n"
17841 - "25: movl %%eax, 40(%3)\n"
17842 - "26: movl %%edx, 44(%3)\n"
17843 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17844 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17845 "27: movl 48(%4), %%eax\n"
17846 "28: movl 52(%4), %%edx\n"
17847 - "29: movl %%eax, 48(%3)\n"
17848 - "30: movl %%edx, 52(%3)\n"
17849 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17850 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17851 "31: movl 56(%4), %%eax\n"
17852 "32: movl 60(%4), %%edx\n"
17853 - "33: movl %%eax, 56(%3)\n"
17854 - "34: movl %%edx, 60(%3)\n"
17855 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17856 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17857 " addl $-64, %0\n"
17858 " addl $64, %4\n"
17859 " addl $64, %3\n"
17860 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17861 " shrl $2, %0\n"
17862 " andl $3, %%eax\n"
17863 " cld\n"
17864 + __COPYUSER_SET_ES
17865 "99: rep; movsl\n"
17866 "36: movl %%eax, %0\n"
17867 "37: rep; movsb\n"
17868 "100:\n"
17869 + __COPYUSER_RESTORE_ES
17870 + ".section .fixup,\"ax\"\n"
17871 + "101: lea 0(%%eax,%0,4),%0\n"
17872 + " jmp 100b\n"
17873 + ".previous\n"
17874 + ".section __ex_table,\"a\"\n"
17875 + " .align 4\n"
17876 + " .long 1b,100b\n"
17877 + " .long 2b,100b\n"
17878 + " .long 3b,100b\n"
17879 + " .long 4b,100b\n"
17880 + " .long 5b,100b\n"
17881 + " .long 6b,100b\n"
17882 + " .long 7b,100b\n"
17883 + " .long 8b,100b\n"
17884 + " .long 9b,100b\n"
17885 + " .long 10b,100b\n"
17886 + " .long 11b,100b\n"
17887 + " .long 12b,100b\n"
17888 + " .long 13b,100b\n"
17889 + " .long 14b,100b\n"
17890 + " .long 15b,100b\n"
17891 + " .long 16b,100b\n"
17892 + " .long 17b,100b\n"
17893 + " .long 18b,100b\n"
17894 + " .long 19b,100b\n"
17895 + " .long 20b,100b\n"
17896 + " .long 21b,100b\n"
17897 + " .long 22b,100b\n"
17898 + " .long 23b,100b\n"
17899 + " .long 24b,100b\n"
17900 + " .long 25b,100b\n"
17901 + " .long 26b,100b\n"
17902 + " .long 27b,100b\n"
17903 + " .long 28b,100b\n"
17904 + " .long 29b,100b\n"
17905 + " .long 30b,100b\n"
17906 + " .long 31b,100b\n"
17907 + " .long 32b,100b\n"
17908 + " .long 33b,100b\n"
17909 + " .long 34b,100b\n"
17910 + " .long 35b,100b\n"
17911 + " .long 36b,100b\n"
17912 + " .long 37b,100b\n"
17913 + " .long 99b,101b\n"
17914 + ".previous"
17915 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17916 + : "1"(to), "2"(from), "0"(size)
17917 + : "eax", "edx", "memory");
17918 + return size;
17919 +}
17920 +
17921 +static unsigned long
17922 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17923 +{
17924 + int d0, d1;
17925 + __asm__ __volatile__(
17926 + " .align 2,0x90\n"
17927 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17928 + " cmpl $67, %0\n"
17929 + " jbe 3f\n"
17930 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17931 + " .align 2,0x90\n"
17932 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17933 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17934 + "5: movl %%eax, 0(%3)\n"
17935 + "6: movl %%edx, 4(%3)\n"
17936 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17937 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17938 + "9: movl %%eax, 8(%3)\n"
17939 + "10: movl %%edx, 12(%3)\n"
17940 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17941 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17942 + "13: movl %%eax, 16(%3)\n"
17943 + "14: movl %%edx, 20(%3)\n"
17944 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17945 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17946 + "17: movl %%eax, 24(%3)\n"
17947 + "18: movl %%edx, 28(%3)\n"
17948 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17949 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17950 + "21: movl %%eax, 32(%3)\n"
17951 + "22: movl %%edx, 36(%3)\n"
17952 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17953 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17954 + "25: movl %%eax, 40(%3)\n"
17955 + "26: movl %%edx, 44(%3)\n"
17956 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17957 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17958 + "29: movl %%eax, 48(%3)\n"
17959 + "30: movl %%edx, 52(%3)\n"
17960 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17961 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17962 + "33: movl %%eax, 56(%3)\n"
17963 + "34: movl %%edx, 60(%3)\n"
17964 + " addl $-64, %0\n"
17965 + " addl $64, %4\n"
17966 + " addl $64, %3\n"
17967 + " cmpl $63, %0\n"
17968 + " ja 1b\n"
17969 + "35: movl %0, %%eax\n"
17970 + " shrl $2, %0\n"
17971 + " andl $3, %%eax\n"
17972 + " cld\n"
17973 + "99: rep; "__copyuser_seg" movsl\n"
17974 + "36: movl %%eax, %0\n"
17975 + "37: rep; "__copyuser_seg" movsb\n"
17976 + "100:\n"
17977 ".section .fixup,\"ax\"\n"
17978 "101: lea 0(%%eax,%0,4),%0\n"
17979 " jmp 100b\n"
17980 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
17981 int d0, d1;
17982 __asm__ __volatile__(
17983 " .align 2,0x90\n"
17984 - "0: movl 32(%4), %%eax\n"
17985 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
17986 " cmpl $67, %0\n"
17987 " jbe 2f\n"
17988 - "1: movl 64(%4), %%eax\n"
17989 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
17990 " .align 2,0x90\n"
17991 - "2: movl 0(%4), %%eax\n"
17992 - "21: movl 4(%4), %%edx\n"
17993 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
17994 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
17995 " movl %%eax, 0(%3)\n"
17996 " movl %%edx, 4(%3)\n"
17997 - "3: movl 8(%4), %%eax\n"
17998 - "31: movl 12(%4),%%edx\n"
17999 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18000 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18001 " movl %%eax, 8(%3)\n"
18002 " movl %%edx, 12(%3)\n"
18003 - "4: movl 16(%4), %%eax\n"
18004 - "41: movl 20(%4), %%edx\n"
18005 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18006 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18007 " movl %%eax, 16(%3)\n"
18008 " movl %%edx, 20(%3)\n"
18009 - "10: movl 24(%4), %%eax\n"
18010 - "51: movl 28(%4), %%edx\n"
18011 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18012 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18013 " movl %%eax, 24(%3)\n"
18014 " movl %%edx, 28(%3)\n"
18015 - "11: movl 32(%4), %%eax\n"
18016 - "61: movl 36(%4), %%edx\n"
18017 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18018 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18019 " movl %%eax, 32(%3)\n"
18020 " movl %%edx, 36(%3)\n"
18021 - "12: movl 40(%4), %%eax\n"
18022 - "71: movl 44(%4), %%edx\n"
18023 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18024 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18025 " movl %%eax, 40(%3)\n"
18026 " movl %%edx, 44(%3)\n"
18027 - "13: movl 48(%4), %%eax\n"
18028 - "81: movl 52(%4), %%edx\n"
18029 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18030 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18031 " movl %%eax, 48(%3)\n"
18032 " movl %%edx, 52(%3)\n"
18033 - "14: movl 56(%4), %%eax\n"
18034 - "91: movl 60(%4), %%edx\n"
18035 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18036 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18037 " movl %%eax, 56(%3)\n"
18038 " movl %%edx, 60(%3)\n"
18039 " addl $-64, %0\n"
18040 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18041 " shrl $2, %0\n"
18042 " andl $3, %%eax\n"
18043 " cld\n"
18044 - "6: rep; movsl\n"
18045 + "6: rep; "__copyuser_seg" movsl\n"
18046 " movl %%eax,%0\n"
18047 - "7: rep; movsb\n"
18048 + "7: rep; "__copyuser_seg" movsb\n"
18049 "8:\n"
18050 ".section .fixup,\"ax\"\n"
18051 "9: lea 0(%%eax,%0,4),%0\n"
18052 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18053
18054 __asm__ __volatile__(
18055 " .align 2,0x90\n"
18056 - "0: movl 32(%4), %%eax\n"
18057 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18058 " cmpl $67, %0\n"
18059 " jbe 2f\n"
18060 - "1: movl 64(%4), %%eax\n"
18061 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18062 " .align 2,0x90\n"
18063 - "2: movl 0(%4), %%eax\n"
18064 - "21: movl 4(%4), %%edx\n"
18065 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18066 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18067 " movnti %%eax, 0(%3)\n"
18068 " movnti %%edx, 4(%3)\n"
18069 - "3: movl 8(%4), %%eax\n"
18070 - "31: movl 12(%4),%%edx\n"
18071 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18072 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18073 " movnti %%eax, 8(%3)\n"
18074 " movnti %%edx, 12(%3)\n"
18075 - "4: movl 16(%4), %%eax\n"
18076 - "41: movl 20(%4), %%edx\n"
18077 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18078 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18079 " movnti %%eax, 16(%3)\n"
18080 " movnti %%edx, 20(%3)\n"
18081 - "10: movl 24(%4), %%eax\n"
18082 - "51: movl 28(%4), %%edx\n"
18083 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18084 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18085 " movnti %%eax, 24(%3)\n"
18086 " movnti %%edx, 28(%3)\n"
18087 - "11: movl 32(%4), %%eax\n"
18088 - "61: movl 36(%4), %%edx\n"
18089 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18090 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18091 " movnti %%eax, 32(%3)\n"
18092 " movnti %%edx, 36(%3)\n"
18093 - "12: movl 40(%4), %%eax\n"
18094 - "71: movl 44(%4), %%edx\n"
18095 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18096 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18097 " movnti %%eax, 40(%3)\n"
18098 " movnti %%edx, 44(%3)\n"
18099 - "13: movl 48(%4), %%eax\n"
18100 - "81: movl 52(%4), %%edx\n"
18101 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18102 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18103 " movnti %%eax, 48(%3)\n"
18104 " movnti %%edx, 52(%3)\n"
18105 - "14: movl 56(%4), %%eax\n"
18106 - "91: movl 60(%4), %%edx\n"
18107 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18108 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18109 " movnti %%eax, 56(%3)\n"
18110 " movnti %%edx, 60(%3)\n"
18111 " addl $-64, %0\n"
18112 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18113 " shrl $2, %0\n"
18114 " andl $3, %%eax\n"
18115 " cld\n"
18116 - "6: rep; movsl\n"
18117 + "6: rep; "__copyuser_seg" movsl\n"
18118 " movl %%eax,%0\n"
18119 - "7: rep; movsb\n"
18120 + "7: rep; "__copyuser_seg" movsb\n"
18121 "8:\n"
18122 ".section .fixup,\"ax\"\n"
18123 "9: lea 0(%%eax,%0,4),%0\n"
18124 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18125
18126 __asm__ __volatile__(
18127 " .align 2,0x90\n"
18128 - "0: movl 32(%4), %%eax\n"
18129 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18130 " cmpl $67, %0\n"
18131 " jbe 2f\n"
18132 - "1: movl 64(%4), %%eax\n"
18133 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18134 " .align 2,0x90\n"
18135 - "2: movl 0(%4), %%eax\n"
18136 - "21: movl 4(%4), %%edx\n"
18137 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18138 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18139 " movnti %%eax, 0(%3)\n"
18140 " movnti %%edx, 4(%3)\n"
18141 - "3: movl 8(%4), %%eax\n"
18142 - "31: movl 12(%4),%%edx\n"
18143 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18144 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18145 " movnti %%eax, 8(%3)\n"
18146 " movnti %%edx, 12(%3)\n"
18147 - "4: movl 16(%4), %%eax\n"
18148 - "41: movl 20(%4), %%edx\n"
18149 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18150 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18151 " movnti %%eax, 16(%3)\n"
18152 " movnti %%edx, 20(%3)\n"
18153 - "10: movl 24(%4), %%eax\n"
18154 - "51: movl 28(%4), %%edx\n"
18155 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18156 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18157 " movnti %%eax, 24(%3)\n"
18158 " movnti %%edx, 28(%3)\n"
18159 - "11: movl 32(%4), %%eax\n"
18160 - "61: movl 36(%4), %%edx\n"
18161 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18162 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18163 " movnti %%eax, 32(%3)\n"
18164 " movnti %%edx, 36(%3)\n"
18165 - "12: movl 40(%4), %%eax\n"
18166 - "71: movl 44(%4), %%edx\n"
18167 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18168 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18169 " movnti %%eax, 40(%3)\n"
18170 " movnti %%edx, 44(%3)\n"
18171 - "13: movl 48(%4), %%eax\n"
18172 - "81: movl 52(%4), %%edx\n"
18173 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18174 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18175 " movnti %%eax, 48(%3)\n"
18176 " movnti %%edx, 52(%3)\n"
18177 - "14: movl 56(%4), %%eax\n"
18178 - "91: movl 60(%4), %%edx\n"
18179 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18180 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18181 " movnti %%eax, 56(%3)\n"
18182 " movnti %%edx, 60(%3)\n"
18183 " addl $-64, %0\n"
18184 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18185 " shrl $2, %0\n"
18186 " andl $3, %%eax\n"
18187 " cld\n"
18188 - "6: rep; movsl\n"
18189 + "6: rep; "__copyuser_seg" movsl\n"
18190 " movl %%eax,%0\n"
18191 - "7: rep; movsb\n"
18192 + "7: rep; "__copyuser_seg" movsb\n"
18193 "8:\n"
18194 ".section .fixup,\"ax\"\n"
18195 "9: lea 0(%%eax,%0,4),%0\n"
18196 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18197 */
18198 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18199 unsigned long size);
18200 -unsigned long __copy_user_intel(void __user *to, const void *from,
18201 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18202 + unsigned long size);
18203 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18204 unsigned long size);
18205 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18206 const void __user *from, unsigned long size);
18207 #endif /* CONFIG_X86_INTEL_USERCOPY */
18208
18209 /* Generic arbitrary sized copy. */
18210 -#define __copy_user(to, from, size) \
18211 +#define __copy_user(to, from, size, prefix, set, restore) \
18212 do { \
18213 int __d0, __d1, __d2; \
18214 __asm__ __volatile__( \
18215 + set \
18216 " cmp $7,%0\n" \
18217 " jbe 1f\n" \
18218 " movl %1,%0\n" \
18219 " negl %0\n" \
18220 " andl $7,%0\n" \
18221 " subl %0,%3\n" \
18222 - "4: rep; movsb\n" \
18223 + "4: rep; "prefix"movsb\n" \
18224 " movl %3,%0\n" \
18225 " shrl $2,%0\n" \
18226 " andl $3,%3\n" \
18227 " .align 2,0x90\n" \
18228 - "0: rep; movsl\n" \
18229 + "0: rep; "prefix"movsl\n" \
18230 " movl %3,%0\n" \
18231 - "1: rep; movsb\n" \
18232 + "1: rep; "prefix"movsb\n" \
18233 "2:\n" \
18234 + restore \
18235 ".section .fixup,\"ax\"\n" \
18236 "5: addl %3,%0\n" \
18237 " jmp 2b\n" \
18238 @@ -682,14 +799,14 @@ do { \
18239 " negl %0\n" \
18240 " andl $7,%0\n" \
18241 " subl %0,%3\n" \
18242 - "4: rep; movsb\n" \
18243 + "4: rep; "__copyuser_seg"movsb\n" \
18244 " movl %3,%0\n" \
18245 " shrl $2,%0\n" \
18246 " andl $3,%3\n" \
18247 " .align 2,0x90\n" \
18248 - "0: rep; movsl\n" \
18249 + "0: rep; "__copyuser_seg"movsl\n" \
18250 " movl %3,%0\n" \
18251 - "1: rep; movsb\n" \
18252 + "1: rep; "__copyuser_seg"movsb\n" \
18253 "2:\n" \
18254 ".section .fixup,\"ax\"\n" \
18255 "5: addl %3,%0\n" \
18256 @@ -775,9 +892,9 @@ survive:
18257 }
18258 #endif
18259 if (movsl_is_ok(to, from, n))
18260 - __copy_user(to, from, n);
18261 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18262 else
18263 - n = __copy_user_intel(to, from, n);
18264 + n = __generic_copy_to_user_intel(to, from, n);
18265 return n;
18266 }
18267 EXPORT_SYMBOL(__copy_to_user_ll);
18268 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18269 unsigned long n)
18270 {
18271 if (movsl_is_ok(to, from, n))
18272 - __copy_user(to, from, n);
18273 + __copy_user(to, from, n, __copyuser_seg, "", "");
18274 else
18275 - n = __copy_user_intel((void __user *)to,
18276 - (const void *)from, n);
18277 + n = __generic_copy_from_user_intel(to, from, n);
18278 return n;
18279 }
18280 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18281 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18282 if (n > 64 && cpu_has_xmm2)
18283 n = __copy_user_intel_nocache(to, from, n);
18284 else
18285 - __copy_user(to, from, n);
18286 + __copy_user(to, from, n, __copyuser_seg, "", "");
18287 #else
18288 - __copy_user(to, from, n);
18289 + __copy_user(to, from, n, __copyuser_seg, "", "");
18290 #endif
18291 return n;
18292 }
18293 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18294
18295 -/**
18296 - * copy_to_user: - Copy a block of data into user space.
18297 - * @to: Destination address, in user space.
18298 - * @from: Source address, in kernel space.
18299 - * @n: Number of bytes to copy.
18300 - *
18301 - * Context: User context only. This function may sleep.
18302 - *
18303 - * Copy data from kernel space to user space.
18304 - *
18305 - * Returns number of bytes that could not be copied.
18306 - * On success, this will be zero.
18307 - */
18308 -unsigned long
18309 -copy_to_user(void __user *to, const void *from, unsigned long n)
18310 +void copy_from_user_overflow(void)
18311 {
18312 - if (access_ok(VERIFY_WRITE, to, n))
18313 - n = __copy_to_user(to, from, n);
18314 - return n;
18315 + WARN(1, "Buffer overflow detected!\n");
18316 }
18317 -EXPORT_SYMBOL(copy_to_user);
18318 +EXPORT_SYMBOL(copy_from_user_overflow);
18319
18320 -/**
18321 - * copy_from_user: - Copy a block of data from user space.
18322 - * @to: Destination address, in kernel space.
18323 - * @from: Source address, in user space.
18324 - * @n: Number of bytes to copy.
18325 - *
18326 - * Context: User context only. This function may sleep.
18327 - *
18328 - * Copy data from user space to kernel space.
18329 - *
18330 - * Returns number of bytes that could not be copied.
18331 - * On success, this will be zero.
18332 - *
18333 - * If some data could not be copied, this function will pad the copied
18334 - * data to the requested size using zero bytes.
18335 - */
18336 -unsigned long
18337 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18338 +void copy_to_user_overflow(void)
18339 {
18340 - if (access_ok(VERIFY_READ, from, n))
18341 - n = __copy_from_user(to, from, n);
18342 - else
18343 - memset(to, 0, n);
18344 - return n;
18345 + WARN(1, "Buffer overflow detected!\n");
18346 }
18347 -EXPORT_SYMBOL(_copy_from_user);
18348 +EXPORT_SYMBOL(copy_to_user_overflow);
18349
18350 -void copy_from_user_overflow(void)
18351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18352 +void __set_fs(mm_segment_t x)
18353 {
18354 - WARN(1, "Buffer overflow detected!\n");
18355 + switch (x.seg) {
18356 + case 0:
18357 + loadsegment(gs, 0);
18358 + break;
18359 + case TASK_SIZE_MAX:
18360 + loadsegment(gs, __USER_DS);
18361 + break;
18362 + case -1UL:
18363 + loadsegment(gs, __KERNEL_DS);
18364 + break;
18365 + default:
18366 + BUG();
18367 + }
18368 + return;
18369 }
18370 -EXPORT_SYMBOL(copy_from_user_overflow);
18371 +EXPORT_SYMBOL(__set_fs);
18372 +
18373 +void set_fs(mm_segment_t x)
18374 +{
18375 + current_thread_info()->addr_limit = x;
18376 + __set_fs(x);
18377 +}
18378 +EXPORT_SYMBOL(set_fs);
18379 +#endif
18380 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_64.c linux-2.6.39.4/arch/x86/lib/usercopy_64.c
18381 --- linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
18382 +++ linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-08-05 19:44:35.000000000 -0400
18383 @@ -42,6 +42,12 @@ long
18384 __strncpy_from_user(char *dst, const char __user *src, long count)
18385 {
18386 long res;
18387 +
18388 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18389 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18390 + src += PAX_USER_SHADOW_BASE;
18391 +#endif
18392 +
18393 __do_strncpy_from_user(dst, src, count, res);
18394 return res;
18395 }
18396 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18397 {
18398 long __d0;
18399 might_fault();
18400 +
18401 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18402 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18403 + addr += PAX_USER_SHADOW_BASE;
18404 +#endif
18405 +
18406 /* no memory constraint because it doesn't change any memory gcc knows
18407 about */
18408 asm volatile(
18409 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18410
18411 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18412 {
18413 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18414 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18415 +
18416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18417 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18418 + to += PAX_USER_SHADOW_BASE;
18419 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18420 + from += PAX_USER_SHADOW_BASE;
18421 +#endif
18422 +
18423 return copy_user_generic((__force void *)to, (__force void *)from, len);
18424 - }
18425 - return len;
18426 + }
18427 + return len;
18428 }
18429 EXPORT_SYMBOL(copy_in_user);
18430
18431 diff -urNp linux-2.6.39.4/arch/x86/Makefile linux-2.6.39.4/arch/x86/Makefile
18432 --- linux-2.6.39.4/arch/x86/Makefile 2011-05-19 00:06:34.000000000 -0400
18433 +++ linux-2.6.39.4/arch/x86/Makefile 2011-08-05 19:44:35.000000000 -0400
18434 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18435 else
18436 BITS := 64
18437 UTS_MACHINE := x86_64
18438 + biarch := $(call cc-option,-m64)
18439 CHECKFLAGS += -D__x86_64__ -m64
18440
18441 KBUILD_AFLAGS += -m64
18442 @@ -195,3 +196,12 @@ define archhelp
18443 echo ' FDARGS="..." arguments for the booted kernel'
18444 echo ' FDINITRD=file initrd for the booted kernel'
18445 endef
18446 +
18447 +define OLD_LD
18448 +
18449 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18450 +*** Please upgrade your binutils to 2.18 or newer
18451 +endef
18452 +
18453 +archprepare:
18454 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18455 diff -urNp linux-2.6.39.4/arch/x86/mm/extable.c linux-2.6.39.4/arch/x86/mm/extable.c
18456 --- linux-2.6.39.4/arch/x86/mm/extable.c 2011-05-19 00:06:34.000000000 -0400
18457 +++ linux-2.6.39.4/arch/x86/mm/extable.c 2011-08-05 19:44:35.000000000 -0400
18458 @@ -1,14 +1,71 @@
18459 #include <linux/module.h>
18460 #include <linux/spinlock.h>
18461 +#include <linux/sort.h>
18462 #include <asm/uaccess.h>
18463 +#include <asm/pgtable.h>
18464
18465 +/*
18466 + * The exception table needs to be sorted so that the binary
18467 + * search that we use to find entries in it works properly.
18468 + * This is used both for the kernel exception table and for
18469 + * the exception tables of modules that get loaded.
18470 + */
18471 +static int cmp_ex(const void *a, const void *b)
18472 +{
18473 + const struct exception_table_entry *x = a, *y = b;
18474 +
18475 + /* avoid overflow */
18476 + if (x->insn > y->insn)
18477 + return 1;
18478 + if (x->insn < y->insn)
18479 + return -1;
18480 + return 0;
18481 +}
18482 +
18483 +static void swap_ex(void *a, void *b, int size)
18484 +{
18485 + struct exception_table_entry t, *x = a, *y = b;
18486 +
18487 + t = *x;
18488 +
18489 + pax_open_kernel();
18490 + *x = *y;
18491 + *y = t;
18492 + pax_close_kernel();
18493 +}
18494 +
18495 +void sort_extable(struct exception_table_entry *start,
18496 + struct exception_table_entry *finish)
18497 +{
18498 + sort(start, finish - start, sizeof(struct exception_table_entry),
18499 + cmp_ex, swap_ex);
18500 +}
18501 +
18502 +#ifdef CONFIG_MODULES
18503 +/*
18504 + * If the exception table is sorted, any referring to the module init
18505 + * will be at the beginning or the end.
18506 + */
18507 +void trim_init_extable(struct module *m)
18508 +{
18509 + /*trim the beginning*/
18510 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
18511 + m->extable++;
18512 + m->num_exentries--;
18513 + }
18514 + /*trim the end*/
18515 + while (m->num_exentries &&
18516 + within_module_init(m->extable[m->num_exentries-1].insn, m))
18517 + m->num_exentries--;
18518 +}
18519 +#endif /* CONFIG_MODULES */
18520
18521 int fixup_exception(struct pt_regs *regs)
18522 {
18523 const struct exception_table_entry *fixup;
18524
18525 #ifdef CONFIG_PNPBIOS
18526 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18527 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18528 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18529 extern u32 pnp_bios_is_utter_crap;
18530 pnp_bios_is_utter_crap = 1;
18531 diff -urNp linux-2.6.39.4/arch/x86/mm/fault.c linux-2.6.39.4/arch/x86/mm/fault.c
18532 --- linux-2.6.39.4/arch/x86/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
18533 +++ linux-2.6.39.4/arch/x86/mm/fault.c 2011-08-05 19:44:35.000000000 -0400
18534 @@ -12,10 +12,18 @@
18535 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
18536 #include <linux/perf_event.h> /* perf_sw_event */
18537 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18538 +#include <linux/unistd.h>
18539 +#include <linux/compiler.h>
18540
18541 #include <asm/traps.h> /* dotraplinkage, ... */
18542 #include <asm/pgalloc.h> /* pgd_*(), ... */
18543 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18544 +#include <asm/vsyscall.h>
18545 +#include <asm/tlbflush.h>
18546 +
18547 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18548 +#include <asm/stacktrace.h>
18549 +#endif
18550
18551 /*
18552 * Page fault error code bits:
18553 @@ -53,7 +61,7 @@ static inline int __kprobes notify_page_
18554 int ret = 0;
18555
18556 /* kprobe_running() needs smp_processor_id() */
18557 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18558 + if (kprobes_built_in() && !user_mode(regs)) {
18559 preempt_disable();
18560 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18561 ret = 1;
18562 @@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re
18563 return !instr_lo || (instr_lo>>1) == 1;
18564 case 0x00:
18565 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18566 - if (probe_kernel_address(instr, opcode))
18567 + if (user_mode(regs)) {
18568 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18569 + return 0;
18570 + } else if (probe_kernel_address(instr, opcode))
18571 return 0;
18572
18573 *prefetch = (instr_lo == 0xF) &&
18574 @@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign
18575 while (instr < max_instr) {
18576 unsigned char opcode;
18577
18578 - if (probe_kernel_address(instr, opcode))
18579 + if (user_mode(regs)) {
18580 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18581 + break;
18582 + } else if (probe_kernel_address(instr, opcode))
18583 break;
18584
18585 instr++;
18586 @@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s
18587 force_sig_info(si_signo, &info, tsk);
18588 }
18589
18590 +#ifdef CONFIG_PAX_EMUTRAMP
18591 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18592 +#endif
18593 +
18594 +#ifdef CONFIG_PAX_PAGEEXEC
18595 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18596 +{
18597 + pgd_t *pgd;
18598 + pud_t *pud;
18599 + pmd_t *pmd;
18600 +
18601 + pgd = pgd_offset(mm, address);
18602 + if (!pgd_present(*pgd))
18603 + return NULL;
18604 + pud = pud_offset(pgd, address);
18605 + if (!pud_present(*pud))
18606 + return NULL;
18607 + pmd = pmd_offset(pud, address);
18608 + if (!pmd_present(*pmd))
18609 + return NULL;
18610 + return pmd;
18611 +}
18612 +#endif
18613 +
18614 DEFINE_SPINLOCK(pgd_lock);
18615 LIST_HEAD(pgd_list);
18616
18617 @@ -229,10 +267,22 @@ void vmalloc_sync_all(void)
18618 for (address = VMALLOC_START & PMD_MASK;
18619 address >= TASK_SIZE && address < FIXADDR_TOP;
18620 address += PMD_SIZE) {
18621 +
18622 +#ifdef CONFIG_PAX_PER_CPU_PGD
18623 + unsigned long cpu;
18624 +#else
18625 struct page *page;
18626 +#endif
18627
18628 spin_lock(&pgd_lock);
18629 +
18630 +#ifdef CONFIG_PAX_PER_CPU_PGD
18631 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18632 + pgd_t *pgd = get_cpu_pgd(cpu);
18633 + pmd_t *ret;
18634 +#else
18635 list_for_each_entry(page, &pgd_list, lru) {
18636 + pgd_t *pgd = page_address(page);
18637 spinlock_t *pgt_lock;
18638 pmd_t *ret;
18639
18640 @@ -240,8 +290,13 @@ void vmalloc_sync_all(void)
18641 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18642
18643 spin_lock(pgt_lock);
18644 - ret = vmalloc_sync_one(page_address(page), address);
18645 +#endif
18646 +
18647 + ret = vmalloc_sync_one(pgd, address);
18648 +
18649 +#ifndef CONFIG_PAX_PER_CPU_PGD
18650 spin_unlock(pgt_lock);
18651 +#endif
18652
18653 if (!ret)
18654 break;
18655 @@ -275,6 +330,11 @@ static noinline __kprobes int vmalloc_fa
18656 * an interrupt in the middle of a task switch..
18657 */
18658 pgd_paddr = read_cr3();
18659 +
18660 +#ifdef CONFIG_PAX_PER_CPU_PGD
18661 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18662 +#endif
18663 +
18664 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18665 if (!pmd_k)
18666 return -1;
18667 @@ -370,7 +430,14 @@ static noinline __kprobes int vmalloc_fa
18668 * happen within a race in page table update. In the later
18669 * case just flush:
18670 */
18671 +
18672 +#ifdef CONFIG_PAX_PER_CPU_PGD
18673 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18674 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18675 +#else
18676 pgd = pgd_offset(current->active_mm, address);
18677 +#endif
18678 +
18679 pgd_ref = pgd_offset_k(address);
18680 if (pgd_none(*pgd_ref))
18681 return -1;
18682 @@ -532,7 +599,7 @@ static int is_errata93(struct pt_regs *r
18683 static int is_errata100(struct pt_regs *regs, unsigned long address)
18684 {
18685 #ifdef CONFIG_X86_64
18686 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18687 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18688 return 1;
18689 #endif
18690 return 0;
18691 @@ -559,7 +626,7 @@ static int is_f00f_bug(struct pt_regs *r
18692 }
18693
18694 static const char nx_warning[] = KERN_CRIT
18695 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18696 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18697
18698 static void
18699 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18700 @@ -568,15 +635,26 @@ show_fault_oops(struct pt_regs *regs, un
18701 if (!oops_may_print())
18702 return;
18703
18704 - if (error_code & PF_INSTR) {
18705 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18706 unsigned int level;
18707
18708 pte_t *pte = lookup_address(address, &level);
18709
18710 if (pte && pte_present(*pte) && !pte_exec(*pte))
18711 - printk(nx_warning, current_uid());
18712 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18713 }
18714
18715 +#ifdef CONFIG_PAX_KERNEXEC
18716 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18717 + if (current->signal->curr_ip)
18718 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18719 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18720 + else
18721 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18722 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18723 + }
18724 +#endif
18725 +
18726 printk(KERN_ALERT "BUG: unable to handle kernel ");
18727 if (address < PAGE_SIZE)
18728 printk(KERN_CONT "NULL pointer dereference");
18729 @@ -701,6 +779,68 @@ __bad_area_nosemaphore(struct pt_regs *r
18730 unsigned long address, int si_code)
18731 {
18732 struct task_struct *tsk = current;
18733 + struct mm_struct *mm = tsk->mm;
18734 +
18735 +#ifdef CONFIG_X86_64
18736 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18737 + if (regs->ip == (unsigned long)vgettimeofday) {
18738 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
18739 + return;
18740 + } else if (regs->ip == (unsigned long)vtime) {
18741 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
18742 + return;
18743 + } else if (regs->ip == (unsigned long)vgetcpu) {
18744 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
18745 + return;
18746 + }
18747 + }
18748 +#endif
18749 +
18750 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18751 + if (mm && (error_code & PF_USER)) {
18752 + unsigned long ip = regs->ip;
18753 +
18754 + if (v8086_mode(regs))
18755 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18756 +
18757 + /*
18758 + * It's possible to have interrupts off here:
18759 + */
18760 + local_irq_enable();
18761 +
18762 +#ifdef CONFIG_PAX_PAGEEXEC
18763 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18764 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18765 +
18766 +#ifdef CONFIG_PAX_EMUTRAMP
18767 + switch (pax_handle_fetch_fault(regs)) {
18768 + case 2:
18769 + return;
18770 + }
18771 +#endif
18772 +
18773 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18774 + do_group_exit(SIGKILL);
18775 + }
18776 +#endif
18777 +
18778 +#ifdef CONFIG_PAX_SEGMEXEC
18779 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18780 +
18781 +#ifdef CONFIG_PAX_EMUTRAMP
18782 + switch (pax_handle_fetch_fault(regs)) {
18783 + case 2:
18784 + return;
18785 + }
18786 +#endif
18787 +
18788 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18789 + do_group_exit(SIGKILL);
18790 + }
18791 +#endif
18792 +
18793 + }
18794 +#endif
18795
18796 /* User mode accesses just cause a SIGSEGV */
18797 if (error_code & PF_USER) {
18798 @@ -855,6 +995,99 @@ static int spurious_fault_check(unsigned
18799 return 1;
18800 }
18801
18802 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18803 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18804 +{
18805 + pte_t *pte;
18806 + pmd_t *pmd;
18807 + spinlock_t *ptl;
18808 + unsigned char pte_mask;
18809 +
18810 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18811 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18812 + return 0;
18813 +
18814 + /* PaX: it's our fault, let's handle it if we can */
18815 +
18816 + /* PaX: take a look at read faults before acquiring any locks */
18817 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18818 + /* instruction fetch attempt from a protected page in user mode */
18819 + up_read(&mm->mmap_sem);
18820 +
18821 +#ifdef CONFIG_PAX_EMUTRAMP
18822 + switch (pax_handle_fetch_fault(regs)) {
18823 + case 2:
18824 + return 1;
18825 + }
18826 +#endif
18827 +
18828 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18829 + do_group_exit(SIGKILL);
18830 + }
18831 +
18832 + pmd = pax_get_pmd(mm, address);
18833 + if (unlikely(!pmd))
18834 + return 0;
18835 +
18836 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18837 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18838 + pte_unmap_unlock(pte, ptl);
18839 + return 0;
18840 + }
18841 +
18842 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18843 + /* write attempt to a protected page in user mode */
18844 + pte_unmap_unlock(pte, ptl);
18845 + return 0;
18846 + }
18847 +
18848 +#ifdef CONFIG_SMP
18849 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18850 +#else
18851 + if (likely(address > get_limit(regs->cs)))
18852 +#endif
18853 + {
18854 + set_pte(pte, pte_mkread(*pte));
18855 + __flush_tlb_one(address);
18856 + pte_unmap_unlock(pte, ptl);
18857 + up_read(&mm->mmap_sem);
18858 + return 1;
18859 + }
18860 +
18861 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18862 +
18863 + /*
18864 + * PaX: fill DTLB with user rights and retry
18865 + */
18866 + __asm__ __volatile__ (
18867 + "orb %2,(%1)\n"
18868 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18869 +/*
18870 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18871 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18872 + * page fault when examined during a TLB load attempt. this is true not only
18873 + * for PTEs holding a non-present entry but also present entries that will
18874 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18875 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18876 + * for our target pages since their PTEs are simply not in the TLBs at all.
18877 +
18878 + * the best thing in omitting it is that we gain around 15-20% speed in the
18879 + * fast path of the page fault handler and can get rid of tracing since we
18880 + * can no longer flush unintended entries.
18881 + */
18882 + "invlpg (%0)\n"
18883 +#endif
18884 + __copyuser_seg"testb $0,(%0)\n"
18885 + "xorb %3,(%1)\n"
18886 + :
18887 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18888 + : "memory", "cc");
18889 + pte_unmap_unlock(pte, ptl);
18890 + up_read(&mm->mmap_sem);
18891 + return 1;
18892 +}
18893 +#endif
18894 +
18895 /*
18896 * Handle a spurious fault caused by a stale TLB entry.
18897 *
18898 @@ -927,6 +1160,9 @@ int show_unhandled_signals = 1;
18899 static inline int
18900 access_error(unsigned long error_code, struct vm_area_struct *vma)
18901 {
18902 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18903 + return 1;
18904 +
18905 if (error_code & PF_WRITE) {
18906 /* write, present and write, not present: */
18907 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18908 @@ -960,19 +1196,33 @@ do_page_fault(struct pt_regs *regs, unsi
18909 {
18910 struct vm_area_struct *vma;
18911 struct task_struct *tsk;
18912 - unsigned long address;
18913 struct mm_struct *mm;
18914 int fault;
18915 int write = error_code & PF_WRITE;
18916 unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
18917 (write ? FAULT_FLAG_WRITE : 0);
18918
18919 + /* Get the faulting address: */
18920 + unsigned long address = read_cr2();
18921 +
18922 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18923 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18924 + if (!search_exception_tables(regs->ip)) {
18925 + bad_area_nosemaphore(regs, error_code, address);
18926 + return;
18927 + }
18928 + if (address < PAX_USER_SHADOW_BASE) {
18929 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18930 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18931 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18932 + } else
18933 + address -= PAX_USER_SHADOW_BASE;
18934 + }
18935 +#endif
18936 +
18937 tsk = current;
18938 mm = tsk->mm;
18939
18940 - /* Get the faulting address: */
18941 - address = read_cr2();
18942 -
18943 /*
18944 * Detect and handle instructions that would cause a page fault for
18945 * both a tracked kernel page and a userspace page.
18946 @@ -1032,7 +1282,7 @@ do_page_fault(struct pt_regs *regs, unsi
18947 * User-mode registers count as a user access even for any
18948 * potential system fault or CPU buglet:
18949 */
18950 - if (user_mode_vm(regs)) {
18951 + if (user_mode(regs)) {
18952 local_irq_enable();
18953 error_code |= PF_USER;
18954 } else {
18955 @@ -1087,6 +1337,11 @@ retry:
18956 might_sleep();
18957 }
18958
18959 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18960 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
18961 + return;
18962 +#endif
18963 +
18964 vma = find_vma(mm, address);
18965 if (unlikely(!vma)) {
18966 bad_area(regs, error_code, address);
18967 @@ -1098,18 +1353,24 @@ retry:
18968 bad_area(regs, error_code, address);
18969 return;
18970 }
18971 - if (error_code & PF_USER) {
18972 - /*
18973 - * Accessing the stack below %sp is always a bug.
18974 - * The large cushion allows instructions like enter
18975 - * and pusha to work. ("enter $65535, $31" pushes
18976 - * 32 pointers and then decrements %sp by 65535.)
18977 - */
18978 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
18979 - bad_area(regs, error_code, address);
18980 - return;
18981 - }
18982 + /*
18983 + * Accessing the stack below %sp is always a bug.
18984 + * The large cushion allows instructions like enter
18985 + * and pusha to work. ("enter $65535, $31" pushes
18986 + * 32 pointers and then decrements %sp by 65535.)
18987 + */
18988 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
18989 + bad_area(regs, error_code, address);
18990 + return;
18991 }
18992 +
18993 +#ifdef CONFIG_PAX_SEGMEXEC
18994 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
18995 + bad_area(regs, error_code, address);
18996 + return;
18997 + }
18998 +#endif
18999 +
19000 if (unlikely(expand_stack(vma, address))) {
19001 bad_area(regs, error_code, address);
19002 return;
19003 @@ -1164,3 +1425,199 @@ good_area:
19004
19005 up_read(&mm->mmap_sem);
19006 }
19007 +
19008 +#ifdef CONFIG_PAX_EMUTRAMP
19009 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19010 +{
19011 + int err;
19012 +
19013 + do { /* PaX: gcc trampoline emulation #1 */
19014 + unsigned char mov1, mov2;
19015 + unsigned short jmp;
19016 + unsigned int addr1, addr2;
19017 +
19018 +#ifdef CONFIG_X86_64
19019 + if ((regs->ip + 11) >> 32)
19020 + break;
19021 +#endif
19022 +
19023 + err = get_user(mov1, (unsigned char __user *)regs->ip);
19024 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19025 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19026 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19027 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19028 +
19029 + if (err)
19030 + break;
19031 +
19032 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19033 + regs->cx = addr1;
19034 + regs->ax = addr2;
19035 + regs->ip = addr2;
19036 + return 2;
19037 + }
19038 + } while (0);
19039 +
19040 + do { /* PaX: gcc trampoline emulation #2 */
19041 + unsigned char mov, jmp;
19042 + unsigned int addr1, addr2;
19043 +
19044 +#ifdef CONFIG_X86_64
19045 + if ((regs->ip + 9) >> 32)
19046 + break;
19047 +#endif
19048 +
19049 + err = get_user(mov, (unsigned char __user *)regs->ip);
19050 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19051 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19052 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19053 +
19054 + if (err)
19055 + break;
19056 +
19057 + if (mov == 0xB9 && jmp == 0xE9) {
19058 + regs->cx = addr1;
19059 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19060 + return 2;
19061 + }
19062 + } while (0);
19063 +
19064 + return 1; /* PaX in action */
19065 +}
19066 +
19067 +#ifdef CONFIG_X86_64
19068 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19069 +{
19070 + int err;
19071 +
19072 + do { /* PaX: gcc trampoline emulation #1 */
19073 + unsigned short mov1, mov2, jmp1;
19074 + unsigned char jmp2;
19075 + unsigned int addr1;
19076 + unsigned long addr2;
19077 +
19078 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19079 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19080 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19081 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19082 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19083 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19084 +
19085 + if (err)
19086 + break;
19087 +
19088 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19089 + regs->r11 = addr1;
19090 + regs->r10 = addr2;
19091 + regs->ip = addr1;
19092 + return 2;
19093 + }
19094 + } while (0);
19095 +
19096 + do { /* PaX: gcc trampoline emulation #2 */
19097 + unsigned short mov1, mov2, jmp1;
19098 + unsigned char jmp2;
19099 + unsigned long addr1, addr2;
19100 +
19101 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19102 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19103 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19104 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19105 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19106 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19107 +
19108 + if (err)
19109 + break;
19110 +
19111 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19112 + regs->r11 = addr1;
19113 + regs->r10 = addr2;
19114 + regs->ip = addr1;
19115 + return 2;
19116 + }
19117 + } while (0);
19118 +
19119 + return 1; /* PaX in action */
19120 +}
19121 +#endif
19122 +
19123 +/*
19124 + * PaX: decide what to do with offenders (regs->ip = fault address)
19125 + *
19126 + * returns 1 when task should be killed
19127 + * 2 when gcc trampoline was detected
19128 + */
19129 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19130 +{
19131 + if (v8086_mode(regs))
19132 + return 1;
19133 +
19134 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19135 + return 1;
19136 +
19137 +#ifdef CONFIG_X86_32
19138 + return pax_handle_fetch_fault_32(regs);
19139 +#else
19140 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19141 + return pax_handle_fetch_fault_32(regs);
19142 + else
19143 + return pax_handle_fetch_fault_64(regs);
19144 +#endif
19145 +}
19146 +#endif
19147 +
19148 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19149 +void pax_report_insns(void *pc, void *sp)
19150 +{
19151 + long i;
19152 +
19153 + printk(KERN_ERR "PAX: bytes at PC: ");
19154 + for (i = 0; i < 20; i++) {
19155 + unsigned char c;
19156 + if (get_user(c, (__force unsigned char __user *)pc+i))
19157 + printk(KERN_CONT "?? ");
19158 + else
19159 + printk(KERN_CONT "%02x ", c);
19160 + }
19161 + printk("\n");
19162 +
19163 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19164 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19165 + unsigned long c;
19166 + if (get_user(c, (__force unsigned long __user *)sp+i))
19167 +#ifdef CONFIG_X86_32
19168 + printk(KERN_CONT "???????? ");
19169 +#else
19170 + printk(KERN_CONT "???????????????? ");
19171 +#endif
19172 + else
19173 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19174 + }
19175 + printk("\n");
19176 +}
19177 +#endif
19178 +
19179 +/**
19180 + * probe_kernel_write(): safely attempt to write to a location
19181 + * @dst: address to write to
19182 + * @src: pointer to the data that shall be written
19183 + * @size: size of the data chunk
19184 + *
19185 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19186 + * happens, handle that and return -EFAULT.
19187 + */
19188 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19189 +{
19190 + long ret;
19191 + mm_segment_t old_fs = get_fs();
19192 +
19193 + set_fs(KERNEL_DS);
19194 + pagefault_disable();
19195 + pax_open_kernel();
19196 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19197 + pax_close_kernel();
19198 + pagefault_enable();
19199 + set_fs(old_fs);
19200 +
19201 + return ret ? -EFAULT : 0;
19202 +}
19203 diff -urNp linux-2.6.39.4/arch/x86/mm/gup.c linux-2.6.39.4/arch/x86/mm/gup.c
19204 --- linux-2.6.39.4/arch/x86/mm/gup.c 2011-05-19 00:06:34.000000000 -0400
19205 +++ linux-2.6.39.4/arch/x86/mm/gup.c 2011-08-05 19:44:35.000000000 -0400
19206 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19207 addr = start;
19208 len = (unsigned long) nr_pages << PAGE_SHIFT;
19209 end = start + len;
19210 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19211 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19212 (void __user *)start, len)))
19213 return 0;
19214
19215 diff -urNp linux-2.6.39.4/arch/x86/mm/highmem_32.c linux-2.6.39.4/arch/x86/mm/highmem_32.c
19216 --- linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-05-19 00:06:34.000000000 -0400
19217 +++ linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-08-05 19:44:35.000000000 -0400
19218 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19219 idx = type + KM_TYPE_NR*smp_processor_id();
19220 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19221 BUG_ON(!pte_none(*(kmap_pte-idx)));
19222 +
19223 + pax_open_kernel();
19224 set_pte(kmap_pte-idx, mk_pte(page, prot));
19225 + pax_close_kernel();
19226
19227 return (void *)vaddr;
19228 }
19229 diff -urNp linux-2.6.39.4/arch/x86/mm/hugetlbpage.c linux-2.6.39.4/arch/x86/mm/hugetlbpage.c
19230 --- linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
19231 +++ linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-08-05 19:44:35.000000000 -0400
19232 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19233 struct hstate *h = hstate_file(file);
19234 struct mm_struct *mm = current->mm;
19235 struct vm_area_struct *vma;
19236 - unsigned long start_addr;
19237 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19238 +
19239 +#ifdef CONFIG_PAX_SEGMEXEC
19240 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19241 + pax_task_size = SEGMEXEC_TASK_SIZE;
19242 +#endif
19243 +
19244 + pax_task_size -= PAGE_SIZE;
19245
19246 if (len > mm->cached_hole_size) {
19247 - start_addr = mm->free_area_cache;
19248 + start_addr = mm->free_area_cache;
19249 } else {
19250 - start_addr = TASK_UNMAPPED_BASE;
19251 - mm->cached_hole_size = 0;
19252 + start_addr = mm->mmap_base;
19253 + mm->cached_hole_size = 0;
19254 }
19255
19256 full_search:
19257 @@ -280,26 +287,27 @@ full_search:
19258
19259 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19260 /* At this point: (!vma || addr < vma->vm_end). */
19261 - if (TASK_SIZE - len < addr) {
19262 + if (pax_task_size - len < addr) {
19263 /*
19264 * Start a new search - just in case we missed
19265 * some holes.
19266 */
19267 - if (start_addr != TASK_UNMAPPED_BASE) {
19268 - start_addr = TASK_UNMAPPED_BASE;
19269 + if (start_addr != mm->mmap_base) {
19270 + start_addr = mm->mmap_base;
19271 mm->cached_hole_size = 0;
19272 goto full_search;
19273 }
19274 return -ENOMEM;
19275 }
19276 - if (!vma || addr + len <= vma->vm_start) {
19277 - mm->free_area_cache = addr + len;
19278 - return addr;
19279 - }
19280 + if (check_heap_stack_gap(vma, addr, len))
19281 + break;
19282 if (addr + mm->cached_hole_size < vma->vm_start)
19283 mm->cached_hole_size = vma->vm_start - addr;
19284 addr = ALIGN(vma->vm_end, huge_page_size(h));
19285 }
19286 +
19287 + mm->free_area_cache = addr + len;
19288 + return addr;
19289 }
19290
19291 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19292 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19293 {
19294 struct hstate *h = hstate_file(file);
19295 struct mm_struct *mm = current->mm;
19296 - struct vm_area_struct *vma, *prev_vma;
19297 - unsigned long base = mm->mmap_base, addr = addr0;
19298 + struct vm_area_struct *vma;
19299 + unsigned long base = mm->mmap_base, addr;
19300 unsigned long largest_hole = mm->cached_hole_size;
19301 - int first_time = 1;
19302
19303 /* don't allow allocations above current base */
19304 if (mm->free_area_cache > base)
19305 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19306 largest_hole = 0;
19307 mm->free_area_cache = base;
19308 }
19309 -try_again:
19310 +
19311 /* make sure it can fit in the remaining address space */
19312 if (mm->free_area_cache < len)
19313 goto fail;
19314
19315 /* either no address requested or can't fit in requested address hole */
19316 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19317 + addr = (mm->free_area_cache - len);
19318 do {
19319 + addr &= huge_page_mask(h);
19320 + vma = find_vma(mm, addr);
19321 /*
19322 * Lookup failure means no vma is above this address,
19323 * i.e. return with success:
19324 - */
19325 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19326 - return addr;
19327 -
19328 - /*
19329 * new region fits between prev_vma->vm_end and
19330 * vma->vm_start, use it:
19331 */
19332 - if (addr + len <= vma->vm_start &&
19333 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19334 + if (check_heap_stack_gap(vma, addr, len)) {
19335 /* remember the address as a hint for next time */
19336 - mm->cached_hole_size = largest_hole;
19337 - return (mm->free_area_cache = addr);
19338 - } else {
19339 - /* pull free_area_cache down to the first hole */
19340 - if (mm->free_area_cache == vma->vm_end) {
19341 - mm->free_area_cache = vma->vm_start;
19342 - mm->cached_hole_size = largest_hole;
19343 - }
19344 + mm->cached_hole_size = largest_hole;
19345 + return (mm->free_area_cache = addr);
19346 + }
19347 + /* pull free_area_cache down to the first hole */
19348 + if (mm->free_area_cache == vma->vm_end) {
19349 + mm->free_area_cache = vma->vm_start;
19350 + mm->cached_hole_size = largest_hole;
19351 }
19352
19353 /* remember the largest hole we saw so far */
19354 if (addr + largest_hole < vma->vm_start)
19355 - largest_hole = vma->vm_start - addr;
19356 + largest_hole = vma->vm_start - addr;
19357
19358 /* try just below the current vma->vm_start */
19359 - addr = (vma->vm_start - len) & huge_page_mask(h);
19360 - } while (len <= vma->vm_start);
19361 + addr = skip_heap_stack_gap(vma, len);
19362 + } while (!IS_ERR_VALUE(addr));
19363
19364 fail:
19365 /*
19366 - * if hint left us with no space for the requested
19367 - * mapping then try again:
19368 - */
19369 - if (first_time) {
19370 - mm->free_area_cache = base;
19371 - largest_hole = 0;
19372 - first_time = 0;
19373 - goto try_again;
19374 - }
19375 - /*
19376 * A failed mmap() very likely causes application failure,
19377 * so fall back to the bottom-up function here. This scenario
19378 * can happen with large stack limits and large mmap()
19379 * allocations.
19380 */
19381 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19382 +
19383 +#ifdef CONFIG_PAX_SEGMEXEC
19384 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19385 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19386 + else
19387 +#endif
19388 +
19389 + mm->mmap_base = TASK_UNMAPPED_BASE;
19390 +
19391 +#ifdef CONFIG_PAX_RANDMMAP
19392 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19393 + mm->mmap_base += mm->delta_mmap;
19394 +#endif
19395 +
19396 + mm->free_area_cache = mm->mmap_base;
19397 mm->cached_hole_size = ~0UL;
19398 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19399 len, pgoff, flags);
19400 @@ -386,6 +392,7 @@ fail:
19401 /*
19402 * Restore the topdown base:
19403 */
19404 + mm->mmap_base = base;
19405 mm->free_area_cache = base;
19406 mm->cached_hole_size = ~0UL;
19407
19408 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19409 struct hstate *h = hstate_file(file);
19410 struct mm_struct *mm = current->mm;
19411 struct vm_area_struct *vma;
19412 + unsigned long pax_task_size = TASK_SIZE;
19413
19414 if (len & ~huge_page_mask(h))
19415 return -EINVAL;
19416 - if (len > TASK_SIZE)
19417 +
19418 +#ifdef CONFIG_PAX_SEGMEXEC
19419 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19420 + pax_task_size = SEGMEXEC_TASK_SIZE;
19421 +#endif
19422 +
19423 + pax_task_size -= PAGE_SIZE;
19424 +
19425 + if (len > pax_task_size)
19426 return -ENOMEM;
19427
19428 if (flags & MAP_FIXED) {
19429 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19430 if (addr) {
19431 addr = ALIGN(addr, huge_page_size(h));
19432 vma = find_vma(mm, addr);
19433 - if (TASK_SIZE - len >= addr &&
19434 - (!vma || addr + len <= vma->vm_start))
19435 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19436 return addr;
19437 }
19438 if (mm->get_unmapped_area == arch_get_unmapped_area)
19439 diff -urNp linux-2.6.39.4/arch/x86/mm/init_32.c linux-2.6.39.4/arch/x86/mm/init_32.c
19440 --- linux-2.6.39.4/arch/x86/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
19441 +++ linux-2.6.39.4/arch/x86/mm/init_32.c 2011-08-05 19:44:35.000000000 -0400
19442 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19443 }
19444
19445 /*
19446 - * Creates a middle page table and puts a pointer to it in the
19447 - * given global directory entry. This only returns the gd entry
19448 - * in non-PAE compilation mode, since the middle layer is folded.
19449 - */
19450 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19451 -{
19452 - pud_t *pud;
19453 - pmd_t *pmd_table;
19454 -
19455 -#ifdef CONFIG_X86_PAE
19456 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19457 - if (after_bootmem)
19458 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19459 - else
19460 - pmd_table = (pmd_t *)alloc_low_page();
19461 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19462 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19463 - pud = pud_offset(pgd, 0);
19464 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19465 -
19466 - return pmd_table;
19467 - }
19468 -#endif
19469 - pud = pud_offset(pgd, 0);
19470 - pmd_table = pmd_offset(pud, 0);
19471 -
19472 - return pmd_table;
19473 -}
19474 -
19475 -/*
19476 * Create a page table and place a pointer to it in a middle page
19477 * directory entry:
19478 */
19479 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19480 page_table = (pte_t *)alloc_low_page();
19481
19482 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19483 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19484 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19485 +#else
19486 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19487 +#endif
19488 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19489 }
19490
19491 return pte_offset_kernel(pmd, 0);
19492 }
19493
19494 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19495 +{
19496 + pud_t *pud;
19497 + pmd_t *pmd_table;
19498 +
19499 + pud = pud_offset(pgd, 0);
19500 + pmd_table = pmd_offset(pud, 0);
19501 +
19502 + return pmd_table;
19503 +}
19504 +
19505 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19506 {
19507 int pgd_idx = pgd_index(vaddr);
19508 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19509 int pgd_idx, pmd_idx;
19510 unsigned long vaddr;
19511 pgd_t *pgd;
19512 + pud_t *pud;
19513 pmd_t *pmd;
19514 pte_t *pte = NULL;
19515
19516 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19517 pgd = pgd_base + pgd_idx;
19518
19519 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19520 - pmd = one_md_table_init(pgd);
19521 - pmd = pmd + pmd_index(vaddr);
19522 + pud = pud_offset(pgd, vaddr);
19523 + pmd = pmd_offset(pud, vaddr);
19524 +
19525 +#ifdef CONFIG_X86_PAE
19526 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19527 +#endif
19528 +
19529 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19530 pmd++, pmd_idx++) {
19531 pte = page_table_kmap_check(one_page_table_init(pmd),
19532 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19533 }
19534 }
19535
19536 -static inline int is_kernel_text(unsigned long addr)
19537 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19538 {
19539 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19540 - return 1;
19541 - return 0;
19542 + if ((start > ktla_ktva((unsigned long)_etext) ||
19543 + end <= ktla_ktva((unsigned long)_stext)) &&
19544 + (start > ktla_ktva((unsigned long)_einittext) ||
19545 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19546 +
19547 +#ifdef CONFIG_ACPI_SLEEP
19548 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19549 +#endif
19550 +
19551 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19552 + return 0;
19553 + return 1;
19554 }
19555
19556 /*
19557 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19558 unsigned long last_map_addr = end;
19559 unsigned long start_pfn, end_pfn;
19560 pgd_t *pgd_base = swapper_pg_dir;
19561 - int pgd_idx, pmd_idx, pte_ofs;
19562 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19563 unsigned long pfn;
19564 pgd_t *pgd;
19565 + pud_t *pud;
19566 pmd_t *pmd;
19567 pte_t *pte;
19568 unsigned pages_2m, pages_4k;
19569 @@ -281,8 +282,13 @@ repeat:
19570 pfn = start_pfn;
19571 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19572 pgd = pgd_base + pgd_idx;
19573 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19574 - pmd = one_md_table_init(pgd);
19575 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19576 + pud = pud_offset(pgd, 0);
19577 + pmd = pmd_offset(pud, 0);
19578 +
19579 +#ifdef CONFIG_X86_PAE
19580 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19581 +#endif
19582
19583 if (pfn >= end_pfn)
19584 continue;
19585 @@ -294,14 +300,13 @@ repeat:
19586 #endif
19587 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19588 pmd++, pmd_idx++) {
19589 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19590 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19591
19592 /*
19593 * Map with big pages if possible, otherwise
19594 * create normal page tables:
19595 */
19596 if (use_pse) {
19597 - unsigned int addr2;
19598 pgprot_t prot = PAGE_KERNEL_LARGE;
19599 /*
19600 * first pass will use the same initial
19601 @@ -311,11 +316,7 @@ repeat:
19602 __pgprot(PTE_IDENT_ATTR |
19603 _PAGE_PSE);
19604
19605 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19606 - PAGE_OFFSET + PAGE_SIZE-1;
19607 -
19608 - if (is_kernel_text(addr) ||
19609 - is_kernel_text(addr2))
19610 + if (is_kernel_text(address, address + PMD_SIZE))
19611 prot = PAGE_KERNEL_LARGE_EXEC;
19612
19613 pages_2m++;
19614 @@ -332,7 +333,7 @@ repeat:
19615 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19616 pte += pte_ofs;
19617 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19618 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19619 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19620 pgprot_t prot = PAGE_KERNEL;
19621 /*
19622 * first pass will use the same initial
19623 @@ -340,7 +341,7 @@ repeat:
19624 */
19625 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19626
19627 - if (is_kernel_text(addr))
19628 + if (is_kernel_text(address, address + PAGE_SIZE))
19629 prot = PAGE_KERNEL_EXEC;
19630
19631 pages_4k++;
19632 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19633
19634 pud = pud_offset(pgd, va);
19635 pmd = pmd_offset(pud, va);
19636 - if (!pmd_present(*pmd))
19637 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19638 break;
19639
19640 pte = pte_offset_kernel(pmd, va);
19641 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19642
19643 static void __init pagetable_init(void)
19644 {
19645 - pgd_t *pgd_base = swapper_pg_dir;
19646 -
19647 - permanent_kmaps_init(pgd_base);
19648 + permanent_kmaps_init(swapper_pg_dir);
19649 }
19650
19651 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19652 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19653 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19654
19655 /* user-defined highmem size */
19656 @@ -754,6 +753,12 @@ void __init mem_init(void)
19657
19658 pci_iommu_alloc();
19659
19660 +#ifdef CONFIG_PAX_PER_CPU_PGD
19661 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19662 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19663 + KERNEL_PGD_PTRS);
19664 +#endif
19665 +
19666 #ifdef CONFIG_FLATMEM
19667 BUG_ON(!mem_map);
19668 #endif
19669 @@ -771,7 +776,7 @@ void __init mem_init(void)
19670 set_highmem_pages_init();
19671
19672 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19673 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19674 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19675 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19676
19677 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19678 @@ -812,10 +817,10 @@ void __init mem_init(void)
19679 ((unsigned long)&__init_end -
19680 (unsigned long)&__init_begin) >> 10,
19681
19682 - (unsigned long)&_etext, (unsigned long)&_edata,
19683 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19684 + (unsigned long)&_sdata, (unsigned long)&_edata,
19685 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19686
19687 - (unsigned long)&_text, (unsigned long)&_etext,
19688 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19689 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19690
19691 /*
19692 @@ -893,6 +898,7 @@ void set_kernel_text_rw(void)
19693 if (!kernel_set_to_readonly)
19694 return;
19695
19696 + start = ktla_ktva(start);
19697 pr_debug("Set kernel text: %lx - %lx for read write\n",
19698 start, start+size);
19699
19700 @@ -907,6 +913,7 @@ void set_kernel_text_ro(void)
19701 if (!kernel_set_to_readonly)
19702 return;
19703
19704 + start = ktla_ktva(start);
19705 pr_debug("Set kernel text: %lx - %lx for read only\n",
19706 start, start+size);
19707
19708 @@ -935,6 +942,7 @@ void mark_rodata_ro(void)
19709 unsigned long start = PFN_ALIGN(_text);
19710 unsigned long size = PFN_ALIGN(_etext) - start;
19711
19712 + start = ktla_ktva(start);
19713 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19714 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19715 size >> 10);
19716 diff -urNp linux-2.6.39.4/arch/x86/mm/init_64.c linux-2.6.39.4/arch/x86/mm/init_64.c
19717 --- linux-2.6.39.4/arch/x86/mm/init_64.c 2011-05-19 00:06:34.000000000 -0400
19718 +++ linux-2.6.39.4/arch/x86/mm/init_64.c 2011-08-05 19:44:35.000000000 -0400
19719 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa
19720 * around without checking the pgd every time.
19721 */
19722
19723 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19724 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19725 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19726
19727 int force_personality32;
19728 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star
19729
19730 for (address = start; address <= end; address += PGDIR_SIZE) {
19731 const pgd_t *pgd_ref = pgd_offset_k(address);
19732 +
19733 +#ifdef CONFIG_PAX_PER_CPU_PGD
19734 + unsigned long cpu;
19735 +#else
19736 struct page *page;
19737 +#endif
19738
19739 if (pgd_none(*pgd_ref))
19740 continue;
19741
19742 spin_lock(&pgd_lock);
19743 +
19744 +#ifdef CONFIG_PAX_PER_CPU_PGD
19745 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19746 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19747 +#else
19748 list_for_each_entry(page, &pgd_list, lru) {
19749 pgd_t *pgd;
19750 spinlock_t *pgt_lock;
19751 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star
19752 /* the pgt_lock only for Xen */
19753 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19754 spin_lock(pgt_lock);
19755 +#endif
19756
19757 if (pgd_none(*pgd))
19758 set_pgd(pgd, *pgd_ref);
19759 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star
19760 BUG_ON(pgd_page_vaddr(*pgd)
19761 != pgd_page_vaddr(*pgd_ref));
19762
19763 +#ifndef CONFIG_PAX_PER_CPU_PGD
19764 spin_unlock(pgt_lock);
19765 +#endif
19766 +
19767 }
19768 spin_unlock(&pgd_lock);
19769 }
19770 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19771 pmd = fill_pmd(pud, vaddr);
19772 pte = fill_pte(pmd, vaddr);
19773
19774 + pax_open_kernel();
19775 set_pte(pte, new_pte);
19776 + pax_close_kernel();
19777
19778 /*
19779 * It's enough to flush this one mapping.
19780 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(
19781 pgd = pgd_offset_k((unsigned long)__va(phys));
19782 if (pgd_none(*pgd)) {
19783 pud = (pud_t *) spp_getpage();
19784 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19785 - _PAGE_USER));
19786 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19787 }
19788 pud = pud_offset(pgd, (unsigned long)__va(phys));
19789 if (pud_none(*pud)) {
19790 pmd = (pmd_t *) spp_getpage();
19791 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19792 - _PAGE_USER));
19793 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19794 }
19795 pmd = pmd_offset(pud, phys);
19796 BUG_ON(!pmd_none(*pmd));
19797 @@ -698,6 +712,12 @@ void __init mem_init(void)
19798
19799 pci_iommu_alloc();
19800
19801 +#ifdef CONFIG_PAX_PER_CPU_PGD
19802 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19803 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19804 + KERNEL_PGD_PTRS);
19805 +#endif
19806 +
19807 /* clear_bss() already clear the empty_zero_page */
19808
19809 reservedpages = 0;
19810 @@ -858,8 +878,8 @@ int kern_addr_valid(unsigned long addr)
19811 static struct vm_area_struct gate_vma = {
19812 .vm_start = VSYSCALL_START,
19813 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19814 - .vm_page_prot = PAGE_READONLY_EXEC,
19815 - .vm_flags = VM_READ | VM_EXEC
19816 + .vm_page_prot = PAGE_READONLY,
19817 + .vm_flags = VM_READ
19818 };
19819
19820 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19821 @@ -893,7 +913,7 @@ int in_gate_area_no_mm(unsigned long add
19822
19823 const char *arch_vma_name(struct vm_area_struct *vma)
19824 {
19825 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19826 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19827 return "[vdso]";
19828 if (vma == &gate_vma)
19829 return "[vsyscall]";
19830 diff -urNp linux-2.6.39.4/arch/x86/mm/init.c linux-2.6.39.4/arch/x86/mm/init.c
19831 --- linux-2.6.39.4/arch/x86/mm/init.c 2011-05-19 00:06:34.000000000 -0400
19832 +++ linux-2.6.39.4/arch/x86/mm/init.c 2011-08-05 19:44:35.000000000 -0400
19833 @@ -33,7 +33,7 @@ int direct_gbpages
19834 static void __init find_early_table_space(unsigned long end, int use_pse,
19835 int use_gbpages)
19836 {
19837 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19838 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19839 phys_addr_t base;
19840
19841 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19842 @@ -315,12 +315,34 @@ unsigned long __init_refok init_memory_m
19843 */
19844 int devmem_is_allowed(unsigned long pagenr)
19845 {
19846 - if (pagenr <= 256)
19847 +#ifdef CONFIG_GRKERNSEC_KMEM
19848 + /* allow BDA */
19849 + if (!pagenr)
19850 + return 1;
19851 + /* allow EBDA */
19852 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19853 + return 1;
19854 +#else
19855 + if (!pagenr)
19856 + return 1;
19857 +#ifdef CONFIG_VM86
19858 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19859 + return 1;
19860 +#endif
19861 +#endif
19862 +
19863 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19864 return 1;
19865 +#ifdef CONFIG_GRKERNSEC_KMEM
19866 + /* throw out everything else below 1MB */
19867 + if (pagenr <= 256)
19868 + return 0;
19869 +#endif
19870 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19871 return 0;
19872 if (!page_is_ram(pagenr))
19873 return 1;
19874 +
19875 return 0;
19876 }
19877
19878 @@ -375,6 +397,86 @@ void free_init_pages(char *what, unsigne
19879
19880 void free_initmem(void)
19881 {
19882 +
19883 +#ifdef CONFIG_PAX_KERNEXEC
19884 +#ifdef CONFIG_X86_32
19885 + /* PaX: limit KERNEL_CS to actual size */
19886 + unsigned long addr, limit;
19887 + struct desc_struct d;
19888 + int cpu;
19889 +
19890 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19891 + limit = (limit - 1UL) >> PAGE_SHIFT;
19892 +
19893 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19894 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19895 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19896 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19897 + }
19898 +
19899 + /* PaX: make KERNEL_CS read-only */
19900 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19901 + if (!paravirt_enabled())
19902 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19903 +/*
19904 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19905 + pgd = pgd_offset_k(addr);
19906 + pud = pud_offset(pgd, addr);
19907 + pmd = pmd_offset(pud, addr);
19908 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19909 + }
19910 +*/
19911 +#ifdef CONFIG_X86_PAE
19912 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19913 +/*
19914 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19915 + pgd = pgd_offset_k(addr);
19916 + pud = pud_offset(pgd, addr);
19917 + pmd = pmd_offset(pud, addr);
19918 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19919 + }
19920 +*/
19921 +#endif
19922 +
19923 +#ifdef CONFIG_MODULES
19924 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19925 +#endif
19926 +
19927 +#else
19928 + pgd_t *pgd;
19929 + pud_t *pud;
19930 + pmd_t *pmd;
19931 + unsigned long addr, end;
19932 +
19933 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19934 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19935 + pgd = pgd_offset_k(addr);
19936 + pud = pud_offset(pgd, addr);
19937 + pmd = pmd_offset(pud, addr);
19938 + if (!pmd_present(*pmd))
19939 + continue;
19940 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19941 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19942 + else
19943 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19944 + }
19945 +
19946 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19947 + end = addr + KERNEL_IMAGE_SIZE;
19948 + for (; addr < end; addr += PMD_SIZE) {
19949 + pgd = pgd_offset_k(addr);
19950 + pud = pud_offset(pgd, addr);
19951 + pmd = pmd_offset(pud, addr);
19952 + if (!pmd_present(*pmd))
19953 + continue;
19954 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19955 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19956 + }
19957 +#endif
19958 +
19959 + flush_tlb_all();
19960 +#endif
19961 +
19962 free_init_pages("unused kernel memory",
19963 (unsigned long)(&__init_begin),
19964 (unsigned long)(&__init_end));
19965 diff -urNp linux-2.6.39.4/arch/x86/mm/iomap_32.c linux-2.6.39.4/arch/x86/mm/iomap_32.c
19966 --- linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-05-19 00:06:34.000000000 -0400
19967 +++ linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-08-05 19:44:35.000000000 -0400
19968 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
19969 type = kmap_atomic_idx_push();
19970 idx = type + KM_TYPE_NR * smp_processor_id();
19971 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19972 +
19973 + pax_open_kernel();
19974 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
19975 + pax_close_kernel();
19976 +
19977 arch_flush_lazy_mmu_mode();
19978
19979 return (void *)vaddr;
19980 diff -urNp linux-2.6.39.4/arch/x86/mm/ioremap.c linux-2.6.39.4/arch/x86/mm/ioremap.c
19981 --- linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-05-19 00:06:34.000000000 -0400
19982 +++ linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-08-05 19:44:35.000000000 -0400
19983 @@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re
19984 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
19985 int is_ram = page_is_ram(pfn);
19986
19987 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
19988 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
19989 return NULL;
19990 WARN_ON_ONCE(is_ram);
19991 }
19992 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
19993 early_param("early_ioremap_debug", early_ioremap_debug_setup);
19994
19995 static __initdata int after_paging_init;
19996 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
19997 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
19998
19999 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20000 {
20001 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20002 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20003
20004 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20005 - memset(bm_pte, 0, sizeof(bm_pte));
20006 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
20007 + pmd_populate_user(&init_mm, pmd, bm_pte);
20008
20009 /*
20010 * The boot-ioremap range spans multiple pmds, for which
20011 diff -urNp linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c
20012 --- linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-19 00:06:34.000000000 -0400
20013 +++ linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-05 19:44:35.000000000 -0400
20014 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20015 * memory (e.g. tracked pages)? For now, we need this to avoid
20016 * invoking kmemcheck for PnP BIOS calls.
20017 */
20018 - if (regs->flags & X86_VM_MASK)
20019 + if (v8086_mode(regs))
20020 return false;
20021 - if (regs->cs != __KERNEL_CS)
20022 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20023 return false;
20024
20025 pte = kmemcheck_pte_lookup(address);
20026 diff -urNp linux-2.6.39.4/arch/x86/mm/mmap.c linux-2.6.39.4/arch/x86/mm/mmap.c
20027 --- linux-2.6.39.4/arch/x86/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
20028 +++ linux-2.6.39.4/arch/x86/mm/mmap.c 2011-08-05 19:44:35.000000000 -0400
20029 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20030 * Leave an at least ~128 MB hole with possible stack randomization.
20031 */
20032 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20033 -#define MAX_GAP (TASK_SIZE/6*5)
20034 +#define MAX_GAP (pax_task_size/6*5)
20035
20036 /*
20037 * True on X86_32 or when emulating IA32 on X86_64
20038 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20039 return rnd << PAGE_SHIFT;
20040 }
20041
20042 -static unsigned long mmap_base(void)
20043 +static unsigned long mmap_base(struct mm_struct *mm)
20044 {
20045 unsigned long gap = rlimit(RLIMIT_STACK);
20046 + unsigned long pax_task_size = TASK_SIZE;
20047 +
20048 +#ifdef CONFIG_PAX_SEGMEXEC
20049 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20050 + pax_task_size = SEGMEXEC_TASK_SIZE;
20051 +#endif
20052
20053 if (gap < MIN_GAP)
20054 gap = MIN_GAP;
20055 else if (gap > MAX_GAP)
20056 gap = MAX_GAP;
20057
20058 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20059 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20060 }
20061
20062 /*
20063 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20064 * does, but not when emulating X86_32
20065 */
20066 -static unsigned long mmap_legacy_base(void)
20067 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
20068 {
20069 - if (mmap_is_ia32())
20070 + if (mmap_is_ia32()) {
20071 +
20072 +#ifdef CONFIG_PAX_SEGMEXEC
20073 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20074 + return SEGMEXEC_TASK_UNMAPPED_BASE;
20075 + else
20076 +#endif
20077 +
20078 return TASK_UNMAPPED_BASE;
20079 - else
20080 + } else
20081 return TASK_UNMAPPED_BASE + mmap_rnd();
20082 }
20083
20084 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20085 void arch_pick_mmap_layout(struct mm_struct *mm)
20086 {
20087 if (mmap_is_legacy()) {
20088 - mm->mmap_base = mmap_legacy_base();
20089 + mm->mmap_base = mmap_legacy_base(mm);
20090 +
20091 +#ifdef CONFIG_PAX_RANDMMAP
20092 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20093 + mm->mmap_base += mm->delta_mmap;
20094 +#endif
20095 +
20096 mm->get_unmapped_area = arch_get_unmapped_area;
20097 mm->unmap_area = arch_unmap_area;
20098 } else {
20099 - mm->mmap_base = mmap_base();
20100 + mm->mmap_base = mmap_base(mm);
20101 +
20102 +#ifdef CONFIG_PAX_RANDMMAP
20103 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20104 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20105 +#endif
20106 +
20107 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20108 mm->unmap_area = arch_unmap_area_topdown;
20109 }
20110 diff -urNp linux-2.6.39.4/arch/x86/mm/mmio-mod.c linux-2.6.39.4/arch/x86/mm/mmio-mod.c
20111 --- linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-05-19 00:06:34.000000000 -0400
20112 +++ linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-08-05 19:44:35.000000000 -0400
20113 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20114 break;
20115 default:
20116 {
20117 - unsigned char *ip = (unsigned char *)instptr;
20118 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20119 my_trace->opcode = MMIO_UNKNOWN_OP;
20120 my_trace->width = 0;
20121 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20122 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20123 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20124 void __iomem *addr)
20125 {
20126 - static atomic_t next_id;
20127 + static atomic_unchecked_t next_id;
20128 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20129 /* These are page-unaligned. */
20130 struct mmiotrace_map map = {
20131 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20132 .private = trace
20133 },
20134 .phys = offset,
20135 - .id = atomic_inc_return(&next_id)
20136 + .id = atomic_inc_return_unchecked(&next_id)
20137 };
20138 map.map_id = trace->id;
20139
20140 diff -urNp linux-2.6.39.4/arch/x86/mm/numa_32.c linux-2.6.39.4/arch/x86/mm/numa_32.c
20141 --- linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-05-19 00:06:34.000000000 -0400
20142 +++ linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-08-05 19:44:35.000000000 -0400
20143 @@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int
20144 }
20145 #endif
20146
20147 -extern unsigned long find_max_low_pfn(void);
20148 extern unsigned long highend_pfn, highstart_pfn;
20149
20150 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
20151 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr.c linux-2.6.39.4/arch/x86/mm/pageattr.c
20152 --- linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-05-19 00:06:34.000000000 -0400
20153 +++ linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-08-05 19:44:35.000000000 -0400
20154 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20155 */
20156 #ifdef CONFIG_PCI_BIOS
20157 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20158 - pgprot_val(forbidden) |= _PAGE_NX;
20159 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20160 #endif
20161
20162 /*
20163 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20164 * Does not cover __inittext since that is gone later on. On
20165 * 64bit we do not enforce !NX on the low mapping
20166 */
20167 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20168 - pgprot_val(forbidden) |= _PAGE_NX;
20169 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20170 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20171
20172 +#ifdef CONFIG_DEBUG_RODATA
20173 /*
20174 * The .rodata section needs to be read-only. Using the pfn
20175 * catches all aliases.
20176 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20177 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20178 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20179 pgprot_val(forbidden) |= _PAGE_RW;
20180 +#endif
20181
20182 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20183 /*
20184 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20185 }
20186 #endif
20187
20188 +#ifdef CONFIG_PAX_KERNEXEC
20189 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20190 + pgprot_val(forbidden) |= _PAGE_RW;
20191 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20192 + }
20193 +#endif
20194 +
20195 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20196
20197 return prot;
20198 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20199 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20200 {
20201 /* change init_mm */
20202 + pax_open_kernel();
20203 set_pte_atomic(kpte, pte);
20204 +
20205 #ifdef CONFIG_X86_32
20206 if (!SHARED_KERNEL_PMD) {
20207 +
20208 +#ifdef CONFIG_PAX_PER_CPU_PGD
20209 + unsigned long cpu;
20210 +#else
20211 struct page *page;
20212 +#endif
20213
20214 +#ifdef CONFIG_PAX_PER_CPU_PGD
20215 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20216 + pgd_t *pgd = get_cpu_pgd(cpu);
20217 +#else
20218 list_for_each_entry(page, &pgd_list, lru) {
20219 - pgd_t *pgd;
20220 + pgd_t *pgd = (pgd_t *)page_address(page);
20221 +#endif
20222 +
20223 pud_t *pud;
20224 pmd_t *pmd;
20225
20226 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20227 + pgd += pgd_index(address);
20228 pud = pud_offset(pgd, address);
20229 pmd = pmd_offset(pud, address);
20230 set_pte_atomic((pte_t *)pmd, pte);
20231 }
20232 }
20233 #endif
20234 + pax_close_kernel();
20235 }
20236
20237 static int
20238 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr-test.c linux-2.6.39.4/arch/x86/mm/pageattr-test.c
20239 --- linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-05-19 00:06:34.000000000 -0400
20240 +++ linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-08-05 19:44:35.000000000 -0400
20241 @@ -36,7 +36,7 @@ enum {
20242
20243 static int pte_testbit(pte_t pte)
20244 {
20245 - return pte_flags(pte) & _PAGE_UNUSED1;
20246 + return pte_flags(pte) & _PAGE_CPA_TEST;
20247 }
20248
20249 struct split_state {
20250 diff -urNp linux-2.6.39.4/arch/x86/mm/pat.c linux-2.6.39.4/arch/x86/mm/pat.c
20251 --- linux-2.6.39.4/arch/x86/mm/pat.c 2011-05-19 00:06:34.000000000 -0400
20252 +++ linux-2.6.39.4/arch/x86/mm/pat.c 2011-08-05 19:44:35.000000000 -0400
20253 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20254
20255 if (!entry) {
20256 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20257 - current->comm, current->pid, start, end);
20258 + current->comm, task_pid_nr(current), start, end);
20259 return -EINVAL;
20260 }
20261
20262 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20263 while (cursor < to) {
20264 if (!devmem_is_allowed(pfn)) {
20265 printk(KERN_INFO
20266 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20267 - current->comm, from, to);
20268 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20269 + current->comm, from, to, cursor);
20270 return 0;
20271 }
20272 cursor += PAGE_SIZE;
20273 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20274 printk(KERN_INFO
20275 "%s:%d ioremap_change_attr failed %s "
20276 "for %Lx-%Lx\n",
20277 - current->comm, current->pid,
20278 + current->comm, task_pid_nr(current),
20279 cattr_name(flags),
20280 base, (unsigned long long)(base + size));
20281 return -EINVAL;
20282 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20283 if (want_flags != flags) {
20284 printk(KERN_WARNING
20285 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20286 - current->comm, current->pid,
20287 + current->comm, task_pid_nr(current),
20288 cattr_name(want_flags),
20289 (unsigned long long)paddr,
20290 (unsigned long long)(paddr + size),
20291 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20292 free_memtype(paddr, paddr + size);
20293 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20294 " for %Lx-%Lx, got %s\n",
20295 - current->comm, current->pid,
20296 + current->comm, task_pid_nr(current),
20297 cattr_name(want_flags),
20298 (unsigned long long)paddr,
20299 (unsigned long long)(paddr + size),
20300 diff -urNp linux-2.6.39.4/arch/x86/mm/pf_in.c linux-2.6.39.4/arch/x86/mm/pf_in.c
20301 --- linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-05-19 00:06:34.000000000 -0400
20302 +++ linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-08-05 19:44:35.000000000 -0400
20303 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20304 int i;
20305 enum reason_type rv = OTHERS;
20306
20307 - p = (unsigned char *)ins_addr;
20308 + p = (unsigned char *)ktla_ktva(ins_addr);
20309 p += skip_prefix(p, &prf);
20310 p += get_opcode(p, &opcode);
20311
20312 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20313 struct prefix_bits prf;
20314 int i;
20315
20316 - p = (unsigned char *)ins_addr;
20317 + p = (unsigned char *)ktla_ktva(ins_addr);
20318 p += skip_prefix(p, &prf);
20319 p += get_opcode(p, &opcode);
20320
20321 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20322 struct prefix_bits prf;
20323 int i;
20324
20325 - p = (unsigned char *)ins_addr;
20326 + p = (unsigned char *)ktla_ktva(ins_addr);
20327 p += skip_prefix(p, &prf);
20328 p += get_opcode(p, &opcode);
20329
20330 @@ -416,7 +416,7 @@ unsigned long get_ins_reg_val(unsigned l
20331 int i;
20332 unsigned long rv;
20333
20334 - p = (unsigned char *)ins_addr;
20335 + p = (unsigned char *)ktla_ktva(ins_addr);
20336 p += skip_prefix(p, &prf);
20337 p += get_opcode(p, &opcode);
20338 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20339 @@ -476,7 +476,7 @@ unsigned long get_ins_imm_val(unsigned l
20340 int i;
20341 unsigned long rv;
20342
20343 - p = (unsigned char *)ins_addr;
20344 + p = (unsigned char *)ktla_ktva(ins_addr);
20345 p += skip_prefix(p, &prf);
20346 p += get_opcode(p, &opcode);
20347 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20348 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable_32.c linux-2.6.39.4/arch/x86/mm/pgtable_32.c
20349 --- linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-05-19 00:06:34.000000000 -0400
20350 +++ linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-08-05 19:44:35.000000000 -0400
20351 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20352 return;
20353 }
20354 pte = pte_offset_kernel(pmd, vaddr);
20355 +
20356 + pax_open_kernel();
20357 if (pte_val(pteval))
20358 set_pte_at(&init_mm, vaddr, pte, pteval);
20359 else
20360 pte_clear(&init_mm, vaddr, pte);
20361 + pax_close_kernel();
20362
20363 /*
20364 * It's enough to flush this one mapping.
20365 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable.c linux-2.6.39.4/arch/x86/mm/pgtable.c
20366 --- linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-05-19 00:06:34.000000000 -0400
20367 +++ linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-08-05 19:44:35.000000000 -0400
20368 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20369 list_del(&page->lru);
20370 }
20371
20372 -#define UNSHARED_PTRS_PER_PGD \
20373 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20374 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20375 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20376
20377 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20378 +{
20379 + while (count--)
20380 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20381 +}
20382 +#endif
20383 +
20384 +#ifdef CONFIG_PAX_PER_CPU_PGD
20385 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20386 +{
20387 + while (count--)
20388 +
20389 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20390 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20391 +#else
20392 + *dst++ = *src++;
20393 +#endif
20394
20395 +}
20396 +#endif
20397 +
20398 +#ifdef CONFIG_X86_64
20399 +#define pxd_t pud_t
20400 +#define pyd_t pgd_t
20401 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20402 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20403 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20404 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20405 +#define PYD_SIZE PGDIR_SIZE
20406 +#else
20407 +#define pxd_t pmd_t
20408 +#define pyd_t pud_t
20409 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20410 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20411 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20412 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20413 +#define PYD_SIZE PUD_SIZE
20414 +#endif
20415 +
20416 +#ifdef CONFIG_PAX_PER_CPU_PGD
20417 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20418 +static inline void pgd_dtor(pgd_t *pgd) {}
20419 +#else
20420 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20421 {
20422 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20423 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20424 pgd_list_del(pgd);
20425 spin_unlock(&pgd_lock);
20426 }
20427 +#endif
20428
20429 /*
20430 * List of all pgd's needed for non-PAE so it can invalidate entries
20431 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20432 * -- wli
20433 */
20434
20435 -#ifdef CONFIG_X86_PAE
20436 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20437 /*
20438 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20439 * updating the top-level pagetable entries to guarantee the
20440 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20441 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20442 * and initialize the kernel pmds here.
20443 */
20444 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20445 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20446
20447 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20448 {
20449 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20450 */
20451 flush_tlb_mm(mm);
20452 }
20453 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20454 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20455 #else /* !CONFIG_X86_PAE */
20456
20457 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20458 -#define PREALLOCATED_PMDS 0
20459 +#define PREALLOCATED_PXDS 0
20460
20461 #endif /* CONFIG_X86_PAE */
20462
20463 -static void free_pmds(pmd_t *pmds[])
20464 +static void free_pxds(pxd_t *pxds[])
20465 {
20466 int i;
20467
20468 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20469 - if (pmds[i])
20470 - free_page((unsigned long)pmds[i]);
20471 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20472 + if (pxds[i])
20473 + free_page((unsigned long)pxds[i]);
20474 }
20475
20476 -static int preallocate_pmds(pmd_t *pmds[])
20477 +static int preallocate_pxds(pxd_t *pxds[])
20478 {
20479 int i;
20480 bool failed = false;
20481
20482 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20483 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20484 - if (pmd == NULL)
20485 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20486 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20487 + if (pxd == NULL)
20488 failed = true;
20489 - pmds[i] = pmd;
20490 + pxds[i] = pxd;
20491 }
20492
20493 if (failed) {
20494 - free_pmds(pmds);
20495 + free_pxds(pxds);
20496 return -ENOMEM;
20497 }
20498
20499 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20500 * preallocate which never got a corresponding vma will need to be
20501 * freed manually.
20502 */
20503 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20504 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20505 {
20506 int i;
20507
20508 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20509 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20510 pgd_t pgd = pgdp[i];
20511
20512 if (pgd_val(pgd) != 0) {
20513 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20514 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20515
20516 - pgdp[i] = native_make_pgd(0);
20517 + set_pgd(pgdp + i, native_make_pgd(0));
20518
20519 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20520 - pmd_free(mm, pmd);
20521 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20522 + pxd_free(mm, pxd);
20523 }
20524 }
20525 }
20526
20527 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20528 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20529 {
20530 - pud_t *pud;
20531 + pyd_t *pyd;
20532 unsigned long addr;
20533 int i;
20534
20535 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20536 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20537 return;
20538
20539 - pud = pud_offset(pgd, 0);
20540 +#ifdef CONFIG_X86_64
20541 + pyd = pyd_offset(mm, 0L);
20542 +#else
20543 + pyd = pyd_offset(pgd, 0L);
20544 +#endif
20545
20546 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20547 - i++, pud++, addr += PUD_SIZE) {
20548 - pmd_t *pmd = pmds[i];
20549 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20550 + i++, pyd++, addr += PYD_SIZE) {
20551 + pxd_t *pxd = pxds[i];
20552
20553 if (i >= KERNEL_PGD_BOUNDARY)
20554 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20555 - sizeof(pmd_t) * PTRS_PER_PMD);
20556 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20557 + sizeof(pxd_t) * PTRS_PER_PMD);
20558
20559 - pud_populate(mm, pud, pmd);
20560 + pyd_populate(mm, pyd, pxd);
20561 }
20562 }
20563
20564 pgd_t *pgd_alloc(struct mm_struct *mm)
20565 {
20566 pgd_t *pgd;
20567 - pmd_t *pmds[PREALLOCATED_PMDS];
20568 + pxd_t *pxds[PREALLOCATED_PXDS];
20569
20570 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20571
20572 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20573
20574 mm->pgd = pgd;
20575
20576 - if (preallocate_pmds(pmds) != 0)
20577 + if (preallocate_pxds(pxds) != 0)
20578 goto out_free_pgd;
20579
20580 if (paravirt_pgd_alloc(mm) != 0)
20581 - goto out_free_pmds;
20582 + goto out_free_pxds;
20583
20584 /*
20585 * Make sure that pre-populating the pmds is atomic with
20586 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20587 spin_lock(&pgd_lock);
20588
20589 pgd_ctor(mm, pgd);
20590 - pgd_prepopulate_pmd(mm, pgd, pmds);
20591 + pgd_prepopulate_pxd(mm, pgd, pxds);
20592
20593 spin_unlock(&pgd_lock);
20594
20595 return pgd;
20596
20597 -out_free_pmds:
20598 - free_pmds(pmds);
20599 +out_free_pxds:
20600 + free_pxds(pxds);
20601 out_free_pgd:
20602 free_page((unsigned long)pgd);
20603 out:
20604 @@ -295,7 +344,7 @@ out:
20605
20606 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20607 {
20608 - pgd_mop_up_pmds(mm, pgd);
20609 + pgd_mop_up_pxds(mm, pgd);
20610 pgd_dtor(pgd);
20611 paravirt_pgd_free(mm, pgd);
20612 free_page((unsigned long)pgd);
20613 diff -urNp linux-2.6.39.4/arch/x86/mm/setup_nx.c linux-2.6.39.4/arch/x86/mm/setup_nx.c
20614 --- linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-05-19 00:06:34.000000000 -0400
20615 +++ linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-08-05 19:44:35.000000000 -0400
20616 @@ -5,8 +5,10 @@
20617 #include <asm/pgtable.h>
20618 #include <asm/proto.h>
20619
20620 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20621 static int disable_nx __cpuinitdata;
20622
20623 +#ifndef CONFIG_PAX_PAGEEXEC
20624 /*
20625 * noexec = on|off
20626 *
20627 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20628 return 0;
20629 }
20630 early_param("noexec", noexec_setup);
20631 +#endif
20632 +
20633 +#endif
20634
20635 void __cpuinit x86_configure_nx(void)
20636 {
20637 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20638 if (cpu_has_nx && !disable_nx)
20639 __supported_pte_mask |= _PAGE_NX;
20640 else
20641 +#endif
20642 __supported_pte_mask &= ~_PAGE_NX;
20643 }
20644
20645 diff -urNp linux-2.6.39.4/arch/x86/mm/tlb.c linux-2.6.39.4/arch/x86/mm/tlb.c
20646 --- linux-2.6.39.4/arch/x86/mm/tlb.c 2011-05-19 00:06:34.000000000 -0400
20647 +++ linux-2.6.39.4/arch/x86/mm/tlb.c 2011-08-05 19:44:35.000000000 -0400
20648 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20649 BUG();
20650 cpumask_clear_cpu(cpu,
20651 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20652 +
20653 +#ifndef CONFIG_PAX_PER_CPU_PGD
20654 load_cr3(swapper_pg_dir);
20655 +#endif
20656 +
20657 }
20658 EXPORT_SYMBOL_GPL(leave_mm);
20659
20660 diff -urNp linux-2.6.39.4/arch/x86/oprofile/backtrace.c linux-2.6.39.4/arch/x86/oprofile/backtrace.c
20661 --- linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-05-19 00:06:34.000000000 -0400
20662 +++ linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-08-05 19:44:35.000000000 -0400
20663 @@ -57,7 +57,7 @@ dump_user_backtrace_32(struct stack_fram
20664 struct stack_frame_ia32 *fp;
20665
20666 /* Also check accessibility of one struct frame_head beyond */
20667 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
20668 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
20669 return NULL;
20670 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
20671 return NULL;
20672 @@ -123,7 +123,7 @@ x86_backtrace(struct pt_regs * const reg
20673 {
20674 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20675
20676 - if (!user_mode_vm(regs)) {
20677 + if (!user_mode(regs)) {
20678 unsigned long stack = kernel_stack_pointer(regs);
20679 if (depth)
20680 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20681 diff -urNp linux-2.6.39.4/arch/x86/pci/mrst.c linux-2.6.39.4/arch/x86/pci/mrst.c
20682 --- linux-2.6.39.4/arch/x86/pci/mrst.c 2011-05-19 00:06:34.000000000 -0400
20683 +++ linux-2.6.39.4/arch/x86/pci/mrst.c 2011-08-05 20:34:06.000000000 -0400
20684 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20685 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20686 pci_mmcfg_late_init();
20687 pcibios_enable_irq = mrst_pci_irq_enable;
20688 - pci_root_ops = pci_mrst_ops;
20689 + pax_open_kernel();
20690 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20691 + pax_close_kernel();
20692 /* Continue with standard init */
20693 return 1;
20694 }
20695 diff -urNp linux-2.6.39.4/arch/x86/pci/pcbios.c linux-2.6.39.4/arch/x86/pci/pcbios.c
20696 --- linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-05-19 00:06:34.000000000 -0400
20697 +++ linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-08-05 20:34:06.000000000 -0400
20698 @@ -79,50 +79,93 @@ union bios32 {
20699 static struct {
20700 unsigned long address;
20701 unsigned short segment;
20702 -} bios32_indirect = { 0, __KERNEL_CS };
20703 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20704
20705 /*
20706 * Returns the entry point for the given service, NULL on error
20707 */
20708
20709 -static unsigned long bios32_service(unsigned long service)
20710 +static unsigned long __devinit bios32_service(unsigned long service)
20711 {
20712 unsigned char return_code; /* %al */
20713 unsigned long address; /* %ebx */
20714 unsigned long length; /* %ecx */
20715 unsigned long entry; /* %edx */
20716 unsigned long flags;
20717 + struct desc_struct d, *gdt;
20718
20719 local_irq_save(flags);
20720 - __asm__("lcall *(%%edi); cld"
20721 +
20722 + gdt = get_cpu_gdt_table(smp_processor_id());
20723 +
20724 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20725 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20726 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20727 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20728 +
20729 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20730 : "=a" (return_code),
20731 "=b" (address),
20732 "=c" (length),
20733 "=d" (entry)
20734 : "0" (service),
20735 "1" (0),
20736 - "D" (&bios32_indirect));
20737 + "D" (&bios32_indirect),
20738 + "r"(__PCIBIOS_DS)
20739 + : "memory");
20740 +
20741 + pax_open_kernel();
20742 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20743 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20744 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20745 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20746 + pax_close_kernel();
20747 +
20748 local_irq_restore(flags);
20749
20750 switch (return_code) {
20751 - case 0:
20752 - return address + entry;
20753 - case 0x80: /* Not present */
20754 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20755 - return 0;
20756 - default: /* Shouldn't happen */
20757 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20758 - service, return_code);
20759 + case 0: {
20760 + int cpu;
20761 + unsigned char flags;
20762 +
20763 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20764 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20765 + printk(KERN_WARNING "bios32_service: not valid\n");
20766 return 0;
20767 + }
20768 + address = address + PAGE_OFFSET;
20769 + length += 16UL; /* some BIOSs underreport this... */
20770 + flags = 4;
20771 + if (length >= 64*1024*1024) {
20772 + length >>= PAGE_SHIFT;
20773 + flags |= 8;
20774 + }
20775 +
20776 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20777 + gdt = get_cpu_gdt_table(cpu);
20778 + pack_descriptor(&d, address, length, 0x9b, flags);
20779 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20780 + pack_descriptor(&d, address, length, 0x93, flags);
20781 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20782 + }
20783 + return entry;
20784 + }
20785 + case 0x80: /* Not present */
20786 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20787 + return 0;
20788 + default: /* Shouldn't happen */
20789 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20790 + service, return_code);
20791 + return 0;
20792 }
20793 }
20794
20795 static struct {
20796 unsigned long address;
20797 unsigned short segment;
20798 -} pci_indirect = { 0, __KERNEL_CS };
20799 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20800
20801 -static int pci_bios_present;
20802 +static int pci_bios_present __read_only;
20803
20804 static int __devinit check_pcibios(void)
20805 {
20806 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20807 unsigned long flags, pcibios_entry;
20808
20809 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20810 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20811 + pci_indirect.address = pcibios_entry;
20812
20813 local_irq_save(flags);
20814 - __asm__(
20815 - "lcall *(%%edi); cld\n\t"
20816 + __asm__("movw %w6, %%ds\n\t"
20817 + "lcall *%%ss:(%%edi); cld\n\t"
20818 + "push %%ss\n\t"
20819 + "pop %%ds\n\t"
20820 "jc 1f\n\t"
20821 "xor %%ah, %%ah\n"
20822 "1:"
20823 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20824 "=b" (ebx),
20825 "=c" (ecx)
20826 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20827 - "D" (&pci_indirect)
20828 + "D" (&pci_indirect),
20829 + "r" (__PCIBIOS_DS)
20830 : "memory");
20831 local_irq_restore(flags);
20832
20833 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20834
20835 switch (len) {
20836 case 1:
20837 - __asm__("lcall *(%%esi); cld\n\t"
20838 + __asm__("movw %w6, %%ds\n\t"
20839 + "lcall *%%ss:(%%esi); cld\n\t"
20840 + "push %%ss\n\t"
20841 + "pop %%ds\n\t"
20842 "jc 1f\n\t"
20843 "xor %%ah, %%ah\n"
20844 "1:"
20845 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20846 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20847 "b" (bx),
20848 "D" ((long)reg),
20849 - "S" (&pci_indirect));
20850 + "S" (&pci_indirect),
20851 + "r" (__PCIBIOS_DS));
20852 /*
20853 * Zero-extend the result beyond 8 bits, do not trust the
20854 * BIOS having done it:
20855 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20856 *value &= 0xff;
20857 break;
20858 case 2:
20859 - __asm__("lcall *(%%esi); cld\n\t"
20860 + __asm__("movw %w6, %%ds\n\t"
20861 + "lcall *%%ss:(%%esi); cld\n\t"
20862 + "push %%ss\n\t"
20863 + "pop %%ds\n\t"
20864 "jc 1f\n\t"
20865 "xor %%ah, %%ah\n"
20866 "1:"
20867 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20868 : "1" (PCIBIOS_READ_CONFIG_WORD),
20869 "b" (bx),
20870 "D" ((long)reg),
20871 - "S" (&pci_indirect));
20872 + "S" (&pci_indirect),
20873 + "r" (__PCIBIOS_DS));
20874 /*
20875 * Zero-extend the result beyond 16 bits, do not trust the
20876 * BIOS having done it:
20877 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20878 *value &= 0xffff;
20879 break;
20880 case 4:
20881 - __asm__("lcall *(%%esi); cld\n\t"
20882 + __asm__("movw %w6, %%ds\n\t"
20883 + "lcall *%%ss:(%%esi); cld\n\t"
20884 + "push %%ss\n\t"
20885 + "pop %%ds\n\t"
20886 "jc 1f\n\t"
20887 "xor %%ah, %%ah\n"
20888 "1:"
20889 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20890 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20891 "b" (bx),
20892 "D" ((long)reg),
20893 - "S" (&pci_indirect));
20894 + "S" (&pci_indirect),
20895 + "r" (__PCIBIOS_DS));
20896 break;
20897 }
20898
20899 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20900
20901 switch (len) {
20902 case 1:
20903 - __asm__("lcall *(%%esi); cld\n\t"
20904 + __asm__("movw %w6, %%ds\n\t"
20905 + "lcall *%%ss:(%%esi); cld\n\t"
20906 + "push %%ss\n\t"
20907 + "pop %%ds\n\t"
20908 "jc 1f\n\t"
20909 "xor %%ah, %%ah\n"
20910 "1:"
20911 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20912 "c" (value),
20913 "b" (bx),
20914 "D" ((long)reg),
20915 - "S" (&pci_indirect));
20916 + "S" (&pci_indirect),
20917 + "r" (__PCIBIOS_DS));
20918 break;
20919 case 2:
20920 - __asm__("lcall *(%%esi); cld\n\t"
20921 + __asm__("movw %w6, %%ds\n\t"
20922 + "lcall *%%ss:(%%esi); cld\n\t"
20923 + "push %%ss\n\t"
20924 + "pop %%ds\n\t"
20925 "jc 1f\n\t"
20926 "xor %%ah, %%ah\n"
20927 "1:"
20928 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20929 "c" (value),
20930 "b" (bx),
20931 "D" ((long)reg),
20932 - "S" (&pci_indirect));
20933 + "S" (&pci_indirect),
20934 + "r" (__PCIBIOS_DS));
20935 break;
20936 case 4:
20937 - __asm__("lcall *(%%esi); cld\n\t"
20938 + __asm__("movw %w6, %%ds\n\t"
20939 + "lcall *%%ss:(%%esi); cld\n\t"
20940 + "push %%ss\n\t"
20941 + "pop %%ds\n\t"
20942 "jc 1f\n\t"
20943 "xor %%ah, %%ah\n"
20944 "1:"
20945 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20946 "c" (value),
20947 "b" (bx),
20948 "D" ((long)reg),
20949 - "S" (&pci_indirect));
20950 + "S" (&pci_indirect),
20951 + "r" (__PCIBIOS_DS));
20952 break;
20953 }
20954
20955 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20956
20957 DBG("PCI: Fetching IRQ routing table... ");
20958 __asm__("push %%es\n\t"
20959 + "movw %w8, %%ds\n\t"
20960 "push %%ds\n\t"
20961 "pop %%es\n\t"
20962 - "lcall *(%%esi); cld\n\t"
20963 + "lcall *%%ss:(%%esi); cld\n\t"
20964 "pop %%es\n\t"
20965 + "push %%ss\n\t"
20966 + "pop %%ds\n"
20967 "jc 1f\n\t"
20968 "xor %%ah, %%ah\n"
20969 "1:"
20970 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
20971 "1" (0),
20972 "D" ((long) &opt),
20973 "S" (&pci_indirect),
20974 - "m" (opt)
20975 + "m" (opt),
20976 + "r" (__PCIBIOS_DS)
20977 : "memory");
20978 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
20979 if (ret & 0xff00)
20980 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
20981 {
20982 int ret;
20983
20984 - __asm__("lcall *(%%esi); cld\n\t"
20985 + __asm__("movw %w5, %%ds\n\t"
20986 + "lcall *%%ss:(%%esi); cld\n\t"
20987 + "push %%ss\n\t"
20988 + "pop %%ds\n"
20989 "jc 1f\n\t"
20990 "xor %%ah, %%ah\n"
20991 "1:"
20992 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
20993 : "0" (PCIBIOS_SET_PCI_HW_INT),
20994 "b" ((dev->bus->number << 8) | dev->devfn),
20995 "c" ((irq << 8) | (pin + 10)),
20996 - "S" (&pci_indirect));
20997 + "S" (&pci_indirect),
20998 + "r" (__PCIBIOS_DS));
20999 return !(ret & 0xff00);
21000 }
21001 EXPORT_SYMBOL(pcibios_set_irq_routing);
21002 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_32.c linux-2.6.39.4/arch/x86/platform/efi/efi_32.c
21003 --- linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-05-19 00:06:34.000000000 -0400
21004 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-08-05 19:44:35.000000000 -0400
21005 @@ -38,70 +38,37 @@
21006 */
21007
21008 static unsigned long efi_rt_eflags;
21009 -static pgd_t efi_bak_pg_dir_pointer[2];
21010 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21011
21012 -void efi_call_phys_prelog(void)
21013 +void __init efi_call_phys_prelog(void)
21014 {
21015 - unsigned long cr4;
21016 - unsigned long temp;
21017 struct desc_ptr gdt_descr;
21018
21019 local_irq_save(efi_rt_eflags);
21020
21021 - /*
21022 - * If I don't have PAE, I should just duplicate two entries in page
21023 - * directory. If I have PAE, I just need to duplicate one entry in
21024 - * page directory.
21025 - */
21026 - cr4 = read_cr4_safe();
21027 -
21028 - if (cr4 & X86_CR4_PAE) {
21029 - efi_bak_pg_dir_pointer[0].pgd =
21030 - swapper_pg_dir[pgd_index(0)].pgd;
21031 - swapper_pg_dir[0].pgd =
21032 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21033 - } else {
21034 - efi_bak_pg_dir_pointer[0].pgd =
21035 - swapper_pg_dir[pgd_index(0)].pgd;
21036 - efi_bak_pg_dir_pointer[1].pgd =
21037 - swapper_pg_dir[pgd_index(0x400000)].pgd;
21038 - swapper_pg_dir[pgd_index(0)].pgd =
21039 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21040 - temp = PAGE_OFFSET + 0x400000;
21041 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21042 - swapper_pg_dir[pgd_index(temp)].pgd;
21043 - }
21044 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21045 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21046 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21047
21048 /*
21049 * After the lock is released, the original page table is restored.
21050 */
21051 __flush_tlb_all();
21052
21053 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
21054 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
21055 gdt_descr.size = GDT_SIZE - 1;
21056 load_gdt(&gdt_descr);
21057 }
21058
21059 -void efi_call_phys_epilog(void)
21060 +void __init efi_call_phys_epilog(void)
21061 {
21062 - unsigned long cr4;
21063 struct desc_ptr gdt_descr;
21064
21065 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21066 + gdt_descr.address = get_cpu_gdt_table(0);
21067 gdt_descr.size = GDT_SIZE - 1;
21068 load_gdt(&gdt_descr);
21069
21070 - cr4 = read_cr4_safe();
21071 -
21072 - if (cr4 & X86_CR4_PAE) {
21073 - swapper_pg_dir[pgd_index(0)].pgd =
21074 - efi_bak_pg_dir_pointer[0].pgd;
21075 - } else {
21076 - swapper_pg_dir[pgd_index(0)].pgd =
21077 - efi_bak_pg_dir_pointer[0].pgd;
21078 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21079 - efi_bak_pg_dir_pointer[1].pgd;
21080 - }
21081 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21082
21083 /*
21084 * After the lock is released, the original page table is restored.
21085 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S
21086 --- linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-05-19 00:06:34.000000000 -0400
21087 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-05 19:44:35.000000000 -0400
21088 @@ -6,6 +6,7 @@
21089 */
21090
21091 #include <linux/linkage.h>
21092 +#include <linux/init.h>
21093 #include <asm/page_types.h>
21094
21095 /*
21096 @@ -20,7 +21,7 @@
21097 * service functions will comply with gcc calling convention, too.
21098 */
21099
21100 -.text
21101 +__INIT
21102 ENTRY(efi_call_phys)
21103 /*
21104 * 0. The function can only be called in Linux kernel. So CS has been
21105 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
21106 * The mapping of lower virtual memory has been created in prelog and
21107 * epilog.
21108 */
21109 - movl $1f, %edx
21110 - subl $__PAGE_OFFSET, %edx
21111 - jmp *%edx
21112 + jmp 1f-__PAGE_OFFSET
21113 1:
21114
21115 /*
21116 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
21117 * parameter 2, ..., param n. To make things easy, we save the return
21118 * address of efi_call_phys in a global variable.
21119 */
21120 - popl %edx
21121 - movl %edx, saved_return_addr
21122 - /* get the function pointer into ECX*/
21123 - popl %ecx
21124 - movl %ecx, efi_rt_function_ptr
21125 - movl $2f, %edx
21126 - subl $__PAGE_OFFSET, %edx
21127 - pushl %edx
21128 + popl (saved_return_addr)
21129 + popl (efi_rt_function_ptr)
21130
21131 /*
21132 * 3. Clear PG bit in %CR0.
21133 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21134 /*
21135 * 5. Call the physical function.
21136 */
21137 - jmp *%ecx
21138 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21139
21140 -2:
21141 /*
21142 * 6. After EFI runtime service returns, control will return to
21143 * following instruction. We'd better readjust stack pointer first.
21144 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21145 movl %cr0, %edx
21146 orl $0x80000000, %edx
21147 movl %edx, %cr0
21148 - jmp 1f
21149 -1:
21150 +
21151 /*
21152 * 8. Now restore the virtual mode from flat mode by
21153 * adding EIP with PAGE_OFFSET.
21154 */
21155 - movl $1f, %edx
21156 - jmp *%edx
21157 + jmp 1f+__PAGE_OFFSET
21158 1:
21159
21160 /*
21161 * 9. Balance the stack. And because EAX contain the return value,
21162 * we'd better not clobber it.
21163 */
21164 - leal efi_rt_function_ptr, %edx
21165 - movl (%edx), %ecx
21166 - pushl %ecx
21167 + pushl (efi_rt_function_ptr)
21168
21169 /*
21170 - * 10. Push the saved return address onto the stack and return.
21171 + * 10. Return to the saved return address.
21172 */
21173 - leal saved_return_addr, %edx
21174 - movl (%edx), %ecx
21175 - pushl %ecx
21176 - ret
21177 + jmpl *(saved_return_addr)
21178 ENDPROC(efi_call_phys)
21179 .previous
21180
21181 -.data
21182 +__INITDATA
21183 saved_return_addr:
21184 .long 0
21185 efi_rt_function_ptr:
21186 diff -urNp linux-2.6.39.4/arch/x86/platform/mrst/mrst.c linux-2.6.39.4/arch/x86/platform/mrst/mrst.c
21187 --- linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-05-19 00:06:34.000000000 -0400
21188 +++ linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-08-05 20:34:06.000000000 -0400
21189 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21190 }
21191
21192 /* Reboot and power off are handled by the SCU on a MID device */
21193 -static void mrst_power_off(void)
21194 +static __noreturn void mrst_power_off(void)
21195 {
21196 intel_scu_ipc_simple_command(0xf1, 1);
21197 + BUG();
21198 }
21199
21200 -static void mrst_reboot(void)
21201 +static __noreturn void mrst_reboot(void)
21202 {
21203 intel_scu_ipc_simple_command(0xf1, 0);
21204 + BUG();
21205 }
21206
21207 /*
21208 diff -urNp linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c
21209 --- linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-05-19 00:06:34.000000000 -0400
21210 +++ linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-08-05 19:44:35.000000000 -0400
21211 @@ -342,6 +342,8 @@ static void uv_reset_with_ipi(struct bau
21212 cpumask_t mask;
21213 struct reset_args reset_args;
21214
21215 + pax_track_stack();
21216 +
21217 reset_args.sender = sender;
21218
21219 cpus_clear(mask);
21220 diff -urNp linux-2.6.39.4/arch/x86/power/cpu.c linux-2.6.39.4/arch/x86/power/cpu.c
21221 --- linux-2.6.39.4/arch/x86/power/cpu.c 2011-05-19 00:06:34.000000000 -0400
21222 +++ linux-2.6.39.4/arch/x86/power/cpu.c 2011-08-05 19:44:35.000000000 -0400
21223 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21224 static void fix_processor_context(void)
21225 {
21226 int cpu = smp_processor_id();
21227 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21228 + struct tss_struct *t = init_tss + cpu;
21229
21230 set_tss_desc(cpu, t); /*
21231 * This just modifies memory; should not be
21232 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21233 */
21234
21235 #ifdef CONFIG_X86_64
21236 + pax_open_kernel();
21237 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21238 + pax_close_kernel();
21239
21240 syscall_init(); /* This sets MSR_*STAR and related */
21241 #endif
21242 diff -urNp linux-2.6.39.4/arch/x86/vdso/Makefile linux-2.6.39.4/arch/x86/vdso/Makefile
21243 --- linux-2.6.39.4/arch/x86/vdso/Makefile 2011-05-19 00:06:34.000000000 -0400
21244 +++ linux-2.6.39.4/arch/x86/vdso/Makefile 2011-08-05 19:44:35.000000000 -0400
21245 @@ -123,7 +123,7 @@ quiet_cmd_vdso = VDSO $@
21246 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21247 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21248
21249 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21250 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21251 GCOV_PROFILE := n
21252
21253 #
21254 diff -urNp linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c
21255 --- linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-05-19 00:06:34.000000000 -0400
21256 +++ linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-08-05 19:44:35.000000000 -0400
21257 @@ -22,24 +22,48 @@
21258 #include <asm/hpet.h>
21259 #include <asm/unistd.h>
21260 #include <asm/io.h>
21261 +#include <asm/fixmap.h>
21262 #include "vextern.h"
21263
21264 #define gtod vdso_vsyscall_gtod_data
21265
21266 +notrace noinline long __vdso_fallback_time(long *t)
21267 +{
21268 + long secs;
21269 + asm volatile("syscall"
21270 + : "=a" (secs)
21271 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
21272 + return secs;
21273 +}
21274 +
21275 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
21276 {
21277 long ret;
21278 asm("syscall" : "=a" (ret) :
21279 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
21280 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
21281 return ret;
21282 }
21283
21284 +notrace static inline cycle_t __vdso_vread_hpet(void)
21285 +{
21286 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
21287 +}
21288 +
21289 +notrace static inline cycle_t __vdso_vread_tsc(void)
21290 +{
21291 + cycle_t ret = (cycle_t)vget_cycles();
21292 +
21293 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
21294 +}
21295 +
21296 notrace static inline long vgetns(void)
21297 {
21298 long v;
21299 - cycles_t (*vread)(void);
21300 - vread = gtod->clock.vread;
21301 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
21302 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
21303 + v = __vdso_vread_tsc();
21304 + else
21305 + v = __vdso_vread_hpet();
21306 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
21307 return (v * gtod->clock.mult) >> gtod->clock.shift;
21308 }
21309
21310 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
21311
21312 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
21313 {
21314 - if (likely(gtod->sysctl_enabled))
21315 + if (likely(gtod->sysctl_enabled &&
21316 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21317 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21318 switch (clock) {
21319 case CLOCK_REALTIME:
21320 if (likely(gtod->clock.vread))
21321 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
21322 int clock_gettime(clockid_t, struct timespec *)
21323 __attribute__((weak, alias("__vdso_clock_gettime")));
21324
21325 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21326 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
21327 {
21328 long ret;
21329 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
21330 + asm("syscall" : "=a" (ret) :
21331 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
21332 + return ret;
21333 +}
21334 +
21335 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21336 +{
21337 + if (likely(gtod->sysctl_enabled &&
21338 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21339 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21340 + {
21341 if (likely(tv != NULL)) {
21342 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
21343 offsetof(struct timespec, tv_nsec) ||
21344 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
21345 }
21346 return 0;
21347 }
21348 - asm("syscall" : "=a" (ret) :
21349 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
21350 - return ret;
21351 + return __vdso_fallback_gettimeofday(tv, tz);
21352 }
21353 int gettimeofday(struct timeval *, struct timezone *)
21354 __attribute__((weak, alias("__vdso_gettimeofday")));
21355 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c
21356 --- linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-05-19 00:06:34.000000000 -0400
21357 +++ linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-08-05 19:44:35.000000000 -0400
21358 @@ -25,6 +25,7 @@
21359 #include <asm/tlbflush.h>
21360 #include <asm/vdso.h>
21361 #include <asm/proto.h>
21362 +#include <asm/mman.h>
21363
21364 enum {
21365 VDSO_DISABLED = 0,
21366 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21367 void enable_sep_cpu(void)
21368 {
21369 int cpu = get_cpu();
21370 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21371 + struct tss_struct *tss = init_tss + cpu;
21372
21373 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21374 put_cpu();
21375 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21376 gate_vma.vm_start = FIXADDR_USER_START;
21377 gate_vma.vm_end = FIXADDR_USER_END;
21378 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21379 - gate_vma.vm_page_prot = __P101;
21380 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21381 /*
21382 * Make sure the vDSO gets into every core dump.
21383 * Dumping its contents makes post-mortem fully interpretable later
21384 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21385 if (compat)
21386 addr = VDSO_HIGH_BASE;
21387 else {
21388 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21389 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21390 if (IS_ERR_VALUE(addr)) {
21391 ret = addr;
21392 goto up_fail;
21393 }
21394 }
21395
21396 - current->mm->context.vdso = (void *)addr;
21397 + current->mm->context.vdso = addr;
21398
21399 if (compat_uses_vma || !compat) {
21400 /*
21401 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21402 }
21403
21404 current_thread_info()->sysenter_return =
21405 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21406 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21407
21408 up_fail:
21409 if (ret)
21410 - current->mm->context.vdso = NULL;
21411 + current->mm->context.vdso = 0;
21412
21413 up_write(&mm->mmap_sem);
21414
21415 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21416
21417 const char *arch_vma_name(struct vm_area_struct *vma)
21418 {
21419 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21420 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21421 return "[vdso]";
21422 +
21423 +#ifdef CONFIG_PAX_SEGMEXEC
21424 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21425 + return "[vdso]";
21426 +#endif
21427 +
21428 return NULL;
21429 }
21430
21431 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21432 * Check to see if the corresponding task was created in compat vdso
21433 * mode.
21434 */
21435 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21436 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21437 return &gate_vma;
21438 return NULL;
21439 }
21440 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso.lds.S linux-2.6.39.4/arch/x86/vdso/vdso.lds.S
21441 --- linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-05-19 00:06:34.000000000 -0400
21442 +++ linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-08-05 19:44:35.000000000 -0400
21443 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
21444 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
21445 #include "vextern.h"
21446 #undef VEXTERN
21447 +
21448 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
21449 +VEXTERN(fallback_gettimeofday)
21450 +VEXTERN(fallback_time)
21451 +VEXTERN(getcpu)
21452 +#undef VEXTERN
21453 diff -urNp linux-2.6.39.4/arch/x86/vdso/vextern.h linux-2.6.39.4/arch/x86/vdso/vextern.h
21454 --- linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-05-19 00:06:34.000000000 -0400
21455 +++ linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-08-05 19:44:35.000000000 -0400
21456 @@ -11,6 +11,5 @@
21457 put into vextern.h and be referenced as a pointer with vdso prefix.
21458 The main kernel later fills in the values. */
21459
21460 -VEXTERN(jiffies)
21461 VEXTERN(vgetcpu_mode)
21462 VEXTERN(vsyscall_gtod_data)
21463 diff -urNp linux-2.6.39.4/arch/x86/vdso/vma.c linux-2.6.39.4/arch/x86/vdso/vma.c
21464 --- linux-2.6.39.4/arch/x86/vdso/vma.c 2011-05-19 00:06:34.000000000 -0400
21465 +++ linux-2.6.39.4/arch/x86/vdso/vma.c 2011-08-05 19:44:35.000000000 -0400
21466 @@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
21467 if (!vbase)
21468 goto oom;
21469
21470 - if (memcmp(vbase, "\177ELF", 4)) {
21471 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
21472 printk("VDSO: I'm broken; not ELF\n");
21473 vdso_enabled = 0;
21474 }
21475 @@ -118,7 +118,7 @@ int arch_setup_additional_pages(struct l
21476 goto up_fail;
21477 }
21478
21479 - current->mm->context.vdso = (void *)addr;
21480 + current->mm->context.vdso = addr;
21481
21482 ret = install_special_mapping(mm, addr, vdso_size,
21483 VM_READ|VM_EXEC|
21484 @@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l
21485 VM_ALWAYSDUMP,
21486 vdso_pages);
21487 if (ret) {
21488 - current->mm->context.vdso = NULL;
21489 + current->mm->context.vdso = 0;
21490 goto up_fail;
21491 }
21492
21493 @@ -134,10 +134,3 @@ up_fail:
21494 up_write(&mm->mmap_sem);
21495 return ret;
21496 }
21497 -
21498 -static __init int vdso_setup(char *s)
21499 -{
21500 - vdso_enabled = simple_strtoul(s, NULL, 0);
21501 - return 0;
21502 -}
21503 -__setup("vdso=", vdso_setup);
21504 diff -urNp linux-2.6.39.4/arch/x86/xen/enlighten.c linux-2.6.39.4/arch/x86/xen/enlighten.c
21505 --- linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-05-19 00:06:34.000000000 -0400
21506 +++ linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-08-05 19:44:35.000000000 -0400
21507 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21508
21509 struct shared_info xen_dummy_shared_info;
21510
21511 -void *xen_initial_gdt;
21512 -
21513 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21514 __read_mostly int xen_have_vector_callback;
21515 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21516 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21517 #endif
21518 };
21519
21520 -static void xen_reboot(int reason)
21521 +static __noreturn void xen_reboot(int reason)
21522 {
21523 struct sched_shutdown r = { .reason = reason };
21524
21525 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21526 BUG();
21527 }
21528
21529 -static void xen_restart(char *msg)
21530 +static __noreturn void xen_restart(char *msg)
21531 {
21532 xen_reboot(SHUTDOWN_reboot);
21533 }
21534
21535 -static void xen_emergency_restart(void)
21536 +static __noreturn void xen_emergency_restart(void)
21537 {
21538 xen_reboot(SHUTDOWN_reboot);
21539 }
21540
21541 -static void xen_machine_halt(void)
21542 +static __noreturn void xen_machine_halt(void)
21543 {
21544 xen_reboot(SHUTDOWN_poweroff);
21545 }
21546 @@ -1127,7 +1125,17 @@ asmlinkage void __init xen_start_kernel(
21547 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21548
21549 /* Work out if we support NX */
21550 - x86_configure_nx();
21551 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21552 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21553 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21554 + unsigned l, h;
21555 +
21556 + __supported_pte_mask |= _PAGE_NX;
21557 + rdmsr(MSR_EFER, l, h);
21558 + l |= EFER_NX;
21559 + wrmsr(MSR_EFER, l, h);
21560 + }
21561 +#endif
21562
21563 xen_setup_features();
21564
21565 @@ -1158,13 +1166,6 @@ asmlinkage void __init xen_start_kernel(
21566
21567 machine_ops = xen_machine_ops;
21568
21569 - /*
21570 - * The only reliable way to retain the initial address of the
21571 - * percpu gdt_page is to remember it here, so we can go and
21572 - * mark it RW later, when the initial percpu area is freed.
21573 - */
21574 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21575 -
21576 xen_smp_init();
21577
21578 #ifdef CONFIG_ACPI_NUMA
21579 diff -urNp linux-2.6.39.4/arch/x86/xen/mmu.c linux-2.6.39.4/arch/x86/xen/mmu.c
21580 --- linux-2.6.39.4/arch/x86/xen/mmu.c 2011-07-09 09:18:51.000000000 -0400
21581 +++ linux-2.6.39.4/arch/x86/xen/mmu.c 2011-08-05 19:44:35.000000000 -0400
21582 @@ -1801,6 +1801,8 @@ __init pgd_t *xen_setup_kernel_pagetable
21583 convert_pfn_mfn(init_level4_pgt);
21584 convert_pfn_mfn(level3_ident_pgt);
21585 convert_pfn_mfn(level3_kernel_pgt);
21586 + convert_pfn_mfn(level3_vmalloc_pgt);
21587 + convert_pfn_mfn(level3_vmemmap_pgt);
21588
21589 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21590 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21591 @@ -1819,7 +1821,10 @@ __init pgd_t *xen_setup_kernel_pagetable
21592 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21593 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21594 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21595 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21596 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21597 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21598 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21599 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21600 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21601
21602 diff -urNp linux-2.6.39.4/arch/x86/xen/smp.c linux-2.6.39.4/arch/x86/xen/smp.c
21603 --- linux-2.6.39.4/arch/x86/xen/smp.c 2011-07-09 09:18:51.000000000 -0400
21604 +++ linux-2.6.39.4/arch/x86/xen/smp.c 2011-08-05 19:44:35.000000000 -0400
21605 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
21606 {
21607 BUG_ON(smp_processor_id() != 0);
21608 native_smp_prepare_boot_cpu();
21609 -
21610 - /* We've switched to the "real" per-cpu gdt, so make sure the
21611 - old memory can be recycled */
21612 - make_lowmem_page_readwrite(xen_initial_gdt);
21613 -
21614 xen_filter_cpu_maps();
21615 xen_setup_vcpu_info_placement();
21616 }
21617 @@ -266,12 +261,12 @@ cpu_initialize_context(unsigned int cpu,
21618 gdt = get_cpu_gdt_table(cpu);
21619
21620 ctxt->flags = VGCF_IN_KERNEL;
21621 - ctxt->user_regs.ds = __USER_DS;
21622 - ctxt->user_regs.es = __USER_DS;
21623 + ctxt->user_regs.ds = __KERNEL_DS;
21624 + ctxt->user_regs.es = __KERNEL_DS;
21625 ctxt->user_regs.ss = __KERNEL_DS;
21626 #ifdef CONFIG_X86_32
21627 ctxt->user_regs.fs = __KERNEL_PERCPU;
21628 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21629 + savesegment(gs, ctxt->user_regs.gs);
21630 #else
21631 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21632 #endif
21633 @@ -322,13 +317,12 @@ static int __cpuinit xen_cpu_up(unsigned
21634 int rc;
21635
21636 per_cpu(current_task, cpu) = idle;
21637 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21638 #ifdef CONFIG_X86_32
21639 irq_ctx_init(cpu);
21640 #else
21641 clear_tsk_thread_flag(idle, TIF_FORK);
21642 - per_cpu(kernel_stack, cpu) =
21643 - (unsigned long)task_stack_page(idle) -
21644 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21645 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21646 #endif
21647 xen_setup_runstate_info(cpu);
21648 xen_setup_timer(cpu);
21649 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-asm_32.S linux-2.6.39.4/arch/x86/xen/xen-asm_32.S
21650 --- linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-05-19 00:06:34.000000000 -0400
21651 +++ linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-08-05 19:44:35.000000000 -0400
21652 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21653 ESP_OFFSET=4 # bytes pushed onto stack
21654
21655 /*
21656 - * Store vcpu_info pointer for easy access. Do it this way to
21657 - * avoid having to reload %fs
21658 + * Store vcpu_info pointer for easy access.
21659 */
21660 #ifdef CONFIG_SMP
21661 - GET_THREAD_INFO(%eax)
21662 - movl TI_cpu(%eax), %eax
21663 - movl __per_cpu_offset(,%eax,4), %eax
21664 - mov xen_vcpu(%eax), %eax
21665 + push %fs
21666 + mov $(__KERNEL_PERCPU), %eax
21667 + mov %eax, %fs
21668 + mov PER_CPU_VAR(xen_vcpu), %eax
21669 + pop %fs
21670 #else
21671 movl xen_vcpu, %eax
21672 #endif
21673 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-head.S linux-2.6.39.4/arch/x86/xen/xen-head.S
21674 --- linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-05-19 00:06:34.000000000 -0400
21675 +++ linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-08-05 19:44:35.000000000 -0400
21676 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21677 #ifdef CONFIG_X86_32
21678 mov %esi,xen_start_info
21679 mov $init_thread_union+THREAD_SIZE,%esp
21680 +#ifdef CONFIG_SMP
21681 + movl $cpu_gdt_table,%edi
21682 + movl $__per_cpu_load,%eax
21683 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21684 + rorl $16,%eax
21685 + movb %al,__KERNEL_PERCPU + 4(%edi)
21686 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21687 + movl $__per_cpu_end - 1,%eax
21688 + subl $__per_cpu_start,%eax
21689 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21690 +#endif
21691 #else
21692 mov %rsi,xen_start_info
21693 mov $init_thread_union+THREAD_SIZE,%rsp
21694 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-ops.h linux-2.6.39.4/arch/x86/xen/xen-ops.h
21695 --- linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-05-19 00:06:34.000000000 -0400
21696 +++ linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-08-05 19:44:35.000000000 -0400
21697 @@ -10,8 +10,6 @@
21698 extern const char xen_hypervisor_callback[];
21699 extern const char xen_failsafe_callback[];
21700
21701 -extern void *xen_initial_gdt;
21702 -
21703 struct trap_info;
21704 void xen_copy_trap_info(struct trap_info *traps);
21705
21706 diff -urNp linux-2.6.39.4/block/blk-iopoll.c linux-2.6.39.4/block/blk-iopoll.c
21707 --- linux-2.6.39.4/block/blk-iopoll.c 2011-05-19 00:06:34.000000000 -0400
21708 +++ linux-2.6.39.4/block/blk-iopoll.c 2011-08-05 19:44:35.000000000 -0400
21709 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21710 }
21711 EXPORT_SYMBOL(blk_iopoll_complete);
21712
21713 -static void blk_iopoll_softirq(struct softirq_action *h)
21714 +static void blk_iopoll_softirq(void)
21715 {
21716 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21717 int rearm = 0, budget = blk_iopoll_budget;
21718 diff -urNp linux-2.6.39.4/block/blk-map.c linux-2.6.39.4/block/blk-map.c
21719 --- linux-2.6.39.4/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
21720 +++ linux-2.6.39.4/block/blk-map.c 2011-08-05 19:44:35.000000000 -0400
21721 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21722 if (!len || !kbuf)
21723 return -EINVAL;
21724
21725 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21726 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21727 if (do_copy)
21728 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21729 else
21730 diff -urNp linux-2.6.39.4/block/blk-softirq.c linux-2.6.39.4/block/blk-softirq.c
21731 --- linux-2.6.39.4/block/blk-softirq.c 2011-05-19 00:06:34.000000000 -0400
21732 +++ linux-2.6.39.4/block/blk-softirq.c 2011-08-05 19:44:35.000000000 -0400
21733 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21734 * Softirq action handler - move entries to local list and loop over them
21735 * while passing them to the queue registered handler.
21736 */
21737 -static void blk_done_softirq(struct softirq_action *h)
21738 +static void blk_done_softirq(void)
21739 {
21740 struct list_head *cpu_list, local_list;
21741
21742 diff -urNp linux-2.6.39.4/block/bsg.c linux-2.6.39.4/block/bsg.c
21743 --- linux-2.6.39.4/block/bsg.c 2011-05-19 00:06:34.000000000 -0400
21744 +++ linux-2.6.39.4/block/bsg.c 2011-08-05 19:44:35.000000000 -0400
21745 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21746 struct sg_io_v4 *hdr, struct bsg_device *bd,
21747 fmode_t has_write_perm)
21748 {
21749 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21750 + unsigned char *cmdptr;
21751 +
21752 if (hdr->request_len > BLK_MAX_CDB) {
21753 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21754 if (!rq->cmd)
21755 return -ENOMEM;
21756 - }
21757 + cmdptr = rq->cmd;
21758 + } else
21759 + cmdptr = tmpcmd;
21760
21761 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21762 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21763 hdr->request_len))
21764 return -EFAULT;
21765
21766 + if (cmdptr != rq->cmd)
21767 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21768 +
21769 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21770 if (blk_verify_command(rq->cmd, has_write_perm))
21771 return -EPERM;
21772 diff -urNp linux-2.6.39.4/block/scsi_ioctl.c linux-2.6.39.4/block/scsi_ioctl.c
21773 --- linux-2.6.39.4/block/scsi_ioctl.c 2011-05-19 00:06:34.000000000 -0400
21774 +++ linux-2.6.39.4/block/scsi_ioctl.c 2011-08-05 19:44:35.000000000 -0400
21775 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21776 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21777 struct sg_io_hdr *hdr, fmode_t mode)
21778 {
21779 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21780 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21781 + unsigned char *cmdptr;
21782 +
21783 + if (rq->cmd != rq->__cmd)
21784 + cmdptr = rq->cmd;
21785 + else
21786 + cmdptr = tmpcmd;
21787 +
21788 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21789 return -EFAULT;
21790 +
21791 + if (cmdptr != rq->cmd)
21792 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21793 +
21794 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21795 return -EPERM;
21796
21797 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21798 int err;
21799 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21800 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21801 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21802 + unsigned char *cmdptr;
21803
21804 if (!sic)
21805 return -EINVAL;
21806 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21807 */
21808 err = -EFAULT;
21809 rq->cmd_len = cmdlen;
21810 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21811 +
21812 + if (rq->cmd != rq->__cmd)
21813 + cmdptr = rq->cmd;
21814 + else
21815 + cmdptr = tmpcmd;
21816 +
21817 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21818 goto error;
21819
21820 + if (rq->cmd != cmdptr)
21821 + memcpy(rq->cmd, cmdptr, cmdlen);
21822 +
21823 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21824 goto error;
21825
21826 diff -urNp linux-2.6.39.4/crypto/cryptd.c linux-2.6.39.4/crypto/cryptd.c
21827 --- linux-2.6.39.4/crypto/cryptd.c 2011-05-19 00:06:34.000000000 -0400
21828 +++ linux-2.6.39.4/crypto/cryptd.c 2011-08-05 20:34:06.000000000 -0400
21829 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21830
21831 struct cryptd_blkcipher_request_ctx {
21832 crypto_completion_t complete;
21833 -};
21834 +} __no_const;
21835
21836 struct cryptd_hash_ctx {
21837 struct crypto_shash *child;
21838 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21839
21840 struct cryptd_aead_request_ctx {
21841 crypto_completion_t complete;
21842 -};
21843 +} __no_const;
21844
21845 static void cryptd_queue_worker(struct work_struct *work);
21846
21847 diff -urNp linux-2.6.39.4/crypto/gf128mul.c linux-2.6.39.4/crypto/gf128mul.c
21848 --- linux-2.6.39.4/crypto/gf128mul.c 2011-05-19 00:06:34.000000000 -0400
21849 +++ linux-2.6.39.4/crypto/gf128mul.c 2011-08-05 19:44:35.000000000 -0400
21850 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21851 for (i = 0; i < 7; ++i)
21852 gf128mul_x_lle(&p[i + 1], &p[i]);
21853
21854 - memset(r, 0, sizeof(r));
21855 + memset(r, 0, sizeof(*r));
21856 for (i = 0;;) {
21857 u8 ch = ((u8 *)b)[15 - i];
21858
21859 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21860 for (i = 0; i < 7; ++i)
21861 gf128mul_x_bbe(&p[i + 1], &p[i]);
21862
21863 - memset(r, 0, sizeof(r));
21864 + memset(r, 0, sizeof(*r));
21865 for (i = 0;;) {
21866 u8 ch = ((u8 *)b)[i];
21867
21868 diff -urNp linux-2.6.39.4/crypto/serpent.c linux-2.6.39.4/crypto/serpent.c
21869 --- linux-2.6.39.4/crypto/serpent.c 2011-05-19 00:06:34.000000000 -0400
21870 +++ linux-2.6.39.4/crypto/serpent.c 2011-08-05 19:44:35.000000000 -0400
21871 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21872 u32 r0,r1,r2,r3,r4;
21873 int i;
21874
21875 + pax_track_stack();
21876 +
21877 /* Copy key, add padding */
21878
21879 for (i = 0; i < keylen; ++i)
21880 diff -urNp linux-2.6.39.4/Documentation/dontdiff linux-2.6.39.4/Documentation/dontdiff
21881 --- linux-2.6.39.4/Documentation/dontdiff 2011-05-19 00:06:34.000000000 -0400
21882 +++ linux-2.6.39.4/Documentation/dontdiff 2011-08-05 19:44:35.000000000 -0400
21883 @@ -1,13 +1,16 @@
21884 *.a
21885 *.aux
21886 *.bin
21887 +*.cis
21888 *.cpio
21889 *.csp
21890 +*.dbg
21891 *.dsp
21892 *.dvi
21893 *.elf
21894 *.eps
21895 *.fw
21896 +*.gcno
21897 *.gen.S
21898 *.gif
21899 *.grep
21900 @@ -38,8 +41,10 @@
21901 *.tab.h
21902 *.tex
21903 *.ver
21904 +*.vim
21905 *.xml
21906 *_MODULES
21907 +*_reg_safe.h
21908 *_vga16.c
21909 *~
21910 *.9
21911 @@ -49,11 +54,16 @@
21912 53c700_d.h
21913 CVS
21914 ChangeSet
21915 +GPATH
21916 +GRTAGS
21917 +GSYMS
21918 +GTAGS
21919 Image
21920 Kerntypes
21921 Module.markers
21922 Module.symvers
21923 PENDING
21924 +PERF*
21925 SCCS
21926 System.map*
21927 TAGS
21928 @@ -80,8 +90,11 @@ btfixupprep
21929 build
21930 bvmlinux
21931 bzImage*
21932 +capability_names.h
21933 capflags.c
21934 classlist.h*
21935 +clut_vga16.c
21936 +common-cmds.h
21937 comp*.log
21938 compile.h*
21939 conf
21940 @@ -106,16 +119,19 @@ fore200e_mkfirm
21941 fore200e_pca_fw.c*
21942 gconf
21943 gen-devlist
21944 +gen-kdb_cmds.c
21945 gen_crc32table
21946 gen_init_cpio
21947 generated
21948 genheaders
21949 genksyms
21950 *_gray256.c
21951 +hash
21952 ihex2fw
21953 ikconfig.h*
21954 inat-tables.c
21955 initramfs_data.cpio
21956 +initramfs_data.cpio.bz2
21957 initramfs_data.cpio.gz
21958 initramfs_list
21959 int16.c
21960 @@ -125,7 +141,6 @@ int32.c
21961 int4.c
21962 int8.c
21963 kallsyms
21964 -kconfig
21965 keywords.c
21966 ksym.c*
21967 ksym.h*
21968 @@ -149,7 +164,9 @@ mkboot
21969 mkbugboot
21970 mkcpustr
21971 mkdep
21972 +mkpiggy
21973 mkprep
21974 +mkregtable
21975 mktables
21976 mktree
21977 modpost
21978 @@ -165,6 +182,7 @@ parse.h
21979 patches*
21980 pca200e.bin
21981 pca200e_ecd.bin2
21982 +perf-archive
21983 piggy.gz
21984 piggyback
21985 piggy.S
21986 @@ -180,7 +198,9 @@ r600_reg_safe.h
21987 raid6altivec*.c
21988 raid6int*.c
21989 raid6tables.c
21990 +regdb.c
21991 relocs
21992 +rlim_names.h
21993 rn50_reg_safe.h
21994 rs600_reg_safe.h
21995 rv515_reg_safe.h
21996 @@ -189,6 +209,7 @@ setup
21997 setup.bin
21998 setup.elf
21999 sImage
22000 +slabinfo
22001 sm_tbl*
22002 split-include
22003 syscalltab.h
22004 @@ -213,13 +234,17 @@ version.h*
22005 vmlinux
22006 vmlinux-*
22007 vmlinux.aout
22008 +vmlinux.bin.all
22009 +vmlinux.bin.bz2
22010 vmlinux.lds
22011 +vmlinux.relocs
22012 voffset.h
22013 vsyscall.lds
22014 vsyscall_32.lds
22015 wanxlfw.inc
22016 uImage
22017 unifdef
22018 +utsrelease.h
22019 wakeup.bin
22020 wakeup.elf
22021 wakeup.lds
22022 diff -urNp linux-2.6.39.4/Documentation/kernel-parameters.txt linux-2.6.39.4/Documentation/kernel-parameters.txt
22023 --- linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-06-25 12:55:22.000000000 -0400
22024 +++ linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-08-05 19:44:35.000000000 -0400
22025 @@ -1879,6 +1879,13 @@ bytes respectively. Such letter suffixes
22026 the specified number of seconds. This is to be used if
22027 your oopses keep scrolling off the screen.
22028
22029 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22030 + virtualization environments that don't cope well with the
22031 + expand down segment used by UDEREF on X86-32 or the frequent
22032 + page table updates on X86-64.
22033 +
22034 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22035 +
22036 pcbit= [HW,ISDN]
22037
22038 pcd. [PARIDE]
22039 diff -urNp linux-2.6.39.4/drivers/acpi/apei/cper.c linux-2.6.39.4/drivers/acpi/apei/cper.c
22040 --- linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-05-19 00:06:34.000000000 -0400
22041 +++ linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-08-05 19:44:35.000000000 -0400
22042 @@ -38,12 +38,12 @@
22043 */
22044 u64 cper_next_record_id(void)
22045 {
22046 - static atomic64_t seq;
22047 + static atomic64_unchecked_t seq;
22048
22049 - if (!atomic64_read(&seq))
22050 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
22051 + if (!atomic64_read_unchecked(&seq))
22052 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22053
22054 - return atomic64_inc_return(&seq);
22055 + return atomic64_inc_return_unchecked(&seq);
22056 }
22057 EXPORT_SYMBOL_GPL(cper_next_record_id);
22058
22059 diff -urNp linux-2.6.39.4/drivers/acpi/power_meter.c linux-2.6.39.4/drivers/acpi/power_meter.c
22060 --- linux-2.6.39.4/drivers/acpi/power_meter.c 2011-05-19 00:06:34.000000000 -0400
22061 +++ linux-2.6.39.4/drivers/acpi/power_meter.c 2011-08-05 19:44:35.000000000 -0400
22062 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
22063 return res;
22064
22065 temp /= 1000;
22066 - if (temp < 0)
22067 - return -EINVAL;
22068
22069 mutex_lock(&resource->lock);
22070 resource->trip[attr->index - 7] = temp;
22071 diff -urNp linux-2.6.39.4/drivers/acpi/proc.c linux-2.6.39.4/drivers/acpi/proc.c
22072 --- linux-2.6.39.4/drivers/acpi/proc.c 2011-05-19 00:06:34.000000000 -0400
22073 +++ linux-2.6.39.4/drivers/acpi/proc.c 2011-08-05 19:44:35.000000000 -0400
22074 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22075 size_t count, loff_t * ppos)
22076 {
22077 struct list_head *node, *next;
22078 - char strbuf[5];
22079 - char str[5] = "";
22080 - unsigned int len = count;
22081 -
22082 - if (len > 4)
22083 - len = 4;
22084 - if (len < 0)
22085 - return -EFAULT;
22086 + char strbuf[5] = {0};
22087
22088 - if (copy_from_user(strbuf, buffer, len))
22089 + if (count > 4)
22090 + count = 4;
22091 + if (copy_from_user(strbuf, buffer, count))
22092 return -EFAULT;
22093 - strbuf[len] = '\0';
22094 - sscanf(strbuf, "%s", str);
22095 + strbuf[count] = '\0';
22096
22097 mutex_lock(&acpi_device_lock);
22098 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22099 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22100 if (!dev->wakeup.flags.valid)
22101 continue;
22102
22103 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
22104 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22105 if (device_can_wakeup(&dev->dev)) {
22106 bool enable = !device_may_wakeup(&dev->dev);
22107 device_set_wakeup_enable(&dev->dev, enable);
22108 diff -urNp linux-2.6.39.4/drivers/acpi/processor_driver.c linux-2.6.39.4/drivers/acpi/processor_driver.c
22109 --- linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-05-19 00:06:34.000000000 -0400
22110 +++ linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-08-05 19:44:35.000000000 -0400
22111 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22112 return 0;
22113 #endif
22114
22115 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22116 + BUG_ON(pr->id >= nr_cpu_ids);
22117
22118 /*
22119 * Buggy BIOS check
22120 diff -urNp linux-2.6.39.4/drivers/ata/libata-core.c linux-2.6.39.4/drivers/ata/libata-core.c
22121 --- linux-2.6.39.4/drivers/ata/libata-core.c 2011-05-19 00:06:34.000000000 -0400
22122 +++ linux-2.6.39.4/drivers/ata/libata-core.c 2011-08-05 20:34:06.000000000 -0400
22123 @@ -4747,7 +4747,7 @@ void ata_qc_free(struct ata_queued_cmd *
22124 struct ata_port *ap;
22125 unsigned int tag;
22126
22127 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22128 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22129 ap = qc->ap;
22130
22131 qc->flags = 0;
22132 @@ -4763,7 +4763,7 @@ void __ata_qc_complete(struct ata_queued
22133 struct ata_port *ap;
22134 struct ata_link *link;
22135
22136 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22137 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22138 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22139 ap = qc->ap;
22140 link = qc->dev->link;
22141 @@ -5768,6 +5768,7 @@ static void ata_finalize_port_ops(struct
22142 return;
22143
22144 spin_lock(&lock);
22145 + pax_open_kernel();
22146
22147 for (cur = ops->inherits; cur; cur = cur->inherits) {
22148 void **inherit = (void **)cur;
22149 @@ -5781,8 +5782,9 @@ static void ata_finalize_port_ops(struct
22150 if (IS_ERR(*pp))
22151 *pp = NULL;
22152
22153 - ops->inherits = NULL;
22154 + *(struct ata_port_operations **)&ops->inherits = NULL;
22155
22156 + pax_close_kernel();
22157 spin_unlock(&lock);
22158 }
22159
22160 diff -urNp linux-2.6.39.4/drivers/ata/libata-eh.c linux-2.6.39.4/drivers/ata/libata-eh.c
22161 --- linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:11:51.000000000 -0400
22162 +++ linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:12:20.000000000 -0400
22163 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22164 {
22165 struct ata_link *link;
22166
22167 + pax_track_stack();
22168 +
22169 ata_for_each_link(link, ap, HOST_FIRST)
22170 ata_eh_link_report(link);
22171 }
22172 diff -urNp linux-2.6.39.4/drivers/ata/pata_arasan_cf.c linux-2.6.39.4/drivers/ata/pata_arasan_cf.c
22173 --- linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-05-19 00:06:34.000000000 -0400
22174 +++ linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-08-05 20:34:06.000000000 -0400
22175 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22176 /* Handle platform specific quirks */
22177 if (pdata->quirk) {
22178 if (pdata->quirk & CF_BROKEN_PIO) {
22179 - ap->ops->set_piomode = NULL;
22180 + pax_open_kernel();
22181 + *(void **)&ap->ops->set_piomode = NULL;
22182 + pax_close_kernel();
22183 ap->pio_mask = 0;
22184 }
22185 if (pdata->quirk & CF_BROKEN_MWDMA)
22186 diff -urNp linux-2.6.39.4/drivers/atm/adummy.c linux-2.6.39.4/drivers/atm/adummy.c
22187 --- linux-2.6.39.4/drivers/atm/adummy.c 2011-05-19 00:06:34.000000000 -0400
22188 +++ linux-2.6.39.4/drivers/atm/adummy.c 2011-08-05 19:44:36.000000000 -0400
22189 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22190 vcc->pop(vcc, skb);
22191 else
22192 dev_kfree_skb_any(skb);
22193 - atomic_inc(&vcc->stats->tx);
22194 + atomic_inc_unchecked(&vcc->stats->tx);
22195
22196 return 0;
22197 }
22198 diff -urNp linux-2.6.39.4/drivers/atm/ambassador.c linux-2.6.39.4/drivers/atm/ambassador.c
22199 --- linux-2.6.39.4/drivers/atm/ambassador.c 2011-05-19 00:06:34.000000000 -0400
22200 +++ linux-2.6.39.4/drivers/atm/ambassador.c 2011-08-05 19:44:36.000000000 -0400
22201 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22202 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22203
22204 // VC layer stats
22205 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22206 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22207
22208 // free the descriptor
22209 kfree (tx_descr);
22210 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22211 dump_skb ("<<<", vc, skb);
22212
22213 // VC layer stats
22214 - atomic_inc(&atm_vcc->stats->rx);
22215 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22216 __net_timestamp(skb);
22217 // end of our responsibility
22218 atm_vcc->push (atm_vcc, skb);
22219 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22220 } else {
22221 PRINTK (KERN_INFO, "dropped over-size frame");
22222 // should we count this?
22223 - atomic_inc(&atm_vcc->stats->rx_drop);
22224 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22225 }
22226
22227 } else {
22228 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22229 }
22230
22231 if (check_area (skb->data, skb->len)) {
22232 - atomic_inc(&atm_vcc->stats->tx_err);
22233 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22234 return -ENOMEM; // ?
22235 }
22236
22237 diff -urNp linux-2.6.39.4/drivers/atm/atmtcp.c linux-2.6.39.4/drivers/atm/atmtcp.c
22238 --- linux-2.6.39.4/drivers/atm/atmtcp.c 2011-05-19 00:06:34.000000000 -0400
22239 +++ linux-2.6.39.4/drivers/atm/atmtcp.c 2011-08-05 19:44:36.000000000 -0400
22240 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22241 if (vcc->pop) vcc->pop(vcc,skb);
22242 else dev_kfree_skb(skb);
22243 if (dev_data) return 0;
22244 - atomic_inc(&vcc->stats->tx_err);
22245 + atomic_inc_unchecked(&vcc->stats->tx_err);
22246 return -ENOLINK;
22247 }
22248 size = skb->len+sizeof(struct atmtcp_hdr);
22249 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22250 if (!new_skb) {
22251 if (vcc->pop) vcc->pop(vcc,skb);
22252 else dev_kfree_skb(skb);
22253 - atomic_inc(&vcc->stats->tx_err);
22254 + atomic_inc_unchecked(&vcc->stats->tx_err);
22255 return -ENOBUFS;
22256 }
22257 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22258 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22259 if (vcc->pop) vcc->pop(vcc,skb);
22260 else dev_kfree_skb(skb);
22261 out_vcc->push(out_vcc,new_skb);
22262 - atomic_inc(&vcc->stats->tx);
22263 - atomic_inc(&out_vcc->stats->rx);
22264 + atomic_inc_unchecked(&vcc->stats->tx);
22265 + atomic_inc_unchecked(&out_vcc->stats->rx);
22266 return 0;
22267 }
22268
22269 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22270 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22271 read_unlock(&vcc_sklist_lock);
22272 if (!out_vcc) {
22273 - atomic_inc(&vcc->stats->tx_err);
22274 + atomic_inc_unchecked(&vcc->stats->tx_err);
22275 goto done;
22276 }
22277 skb_pull(skb,sizeof(struct atmtcp_hdr));
22278 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22279 __net_timestamp(new_skb);
22280 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22281 out_vcc->push(out_vcc,new_skb);
22282 - atomic_inc(&vcc->stats->tx);
22283 - atomic_inc(&out_vcc->stats->rx);
22284 + atomic_inc_unchecked(&vcc->stats->tx);
22285 + atomic_inc_unchecked(&out_vcc->stats->rx);
22286 done:
22287 if (vcc->pop) vcc->pop(vcc,skb);
22288 else dev_kfree_skb(skb);
22289 diff -urNp linux-2.6.39.4/drivers/atm/eni.c linux-2.6.39.4/drivers/atm/eni.c
22290 --- linux-2.6.39.4/drivers/atm/eni.c 2011-05-19 00:06:34.000000000 -0400
22291 +++ linux-2.6.39.4/drivers/atm/eni.c 2011-08-05 19:44:36.000000000 -0400
22292 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22293 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22294 vcc->dev->number);
22295 length = 0;
22296 - atomic_inc(&vcc->stats->rx_err);
22297 + atomic_inc_unchecked(&vcc->stats->rx_err);
22298 }
22299 else {
22300 length = ATM_CELL_SIZE-1; /* no HEC */
22301 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22302 size);
22303 }
22304 eff = length = 0;
22305 - atomic_inc(&vcc->stats->rx_err);
22306 + atomic_inc_unchecked(&vcc->stats->rx_err);
22307 }
22308 else {
22309 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22310 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22311 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22312 vcc->dev->number,vcc->vci,length,size << 2,descr);
22313 length = eff = 0;
22314 - atomic_inc(&vcc->stats->rx_err);
22315 + atomic_inc_unchecked(&vcc->stats->rx_err);
22316 }
22317 }
22318 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22319 @@ -771,7 +771,7 @@ rx_dequeued++;
22320 vcc->push(vcc,skb);
22321 pushed++;
22322 }
22323 - atomic_inc(&vcc->stats->rx);
22324 + atomic_inc_unchecked(&vcc->stats->rx);
22325 }
22326 wake_up(&eni_dev->rx_wait);
22327 }
22328 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22329 PCI_DMA_TODEVICE);
22330 if (vcc->pop) vcc->pop(vcc,skb);
22331 else dev_kfree_skb_irq(skb);
22332 - atomic_inc(&vcc->stats->tx);
22333 + atomic_inc_unchecked(&vcc->stats->tx);
22334 wake_up(&eni_dev->tx_wait);
22335 dma_complete++;
22336 }
22337 diff -urNp linux-2.6.39.4/drivers/atm/firestream.c linux-2.6.39.4/drivers/atm/firestream.c
22338 --- linux-2.6.39.4/drivers/atm/firestream.c 2011-05-19 00:06:34.000000000 -0400
22339 +++ linux-2.6.39.4/drivers/atm/firestream.c 2011-08-05 19:44:36.000000000 -0400
22340 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22341 }
22342 }
22343
22344 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22345 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22346
22347 fs_dprintk (FS_DEBUG_TXMEM, "i");
22348 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22349 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22350 #endif
22351 skb_put (skb, qe->p1 & 0xffff);
22352 ATM_SKB(skb)->vcc = atm_vcc;
22353 - atomic_inc(&atm_vcc->stats->rx);
22354 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22355 __net_timestamp(skb);
22356 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22357 atm_vcc->push (atm_vcc, skb);
22358 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22359 kfree (pe);
22360 }
22361 if (atm_vcc)
22362 - atomic_inc(&atm_vcc->stats->rx_drop);
22363 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22364 break;
22365 case 0x1f: /* Reassembly abort: no buffers. */
22366 /* Silently increment error counter. */
22367 if (atm_vcc)
22368 - atomic_inc(&atm_vcc->stats->rx_drop);
22369 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22370 break;
22371 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22372 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22373 diff -urNp linux-2.6.39.4/drivers/atm/fore200e.c linux-2.6.39.4/drivers/atm/fore200e.c
22374 --- linux-2.6.39.4/drivers/atm/fore200e.c 2011-05-19 00:06:34.000000000 -0400
22375 +++ linux-2.6.39.4/drivers/atm/fore200e.c 2011-08-05 19:44:36.000000000 -0400
22376 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22377 #endif
22378 /* check error condition */
22379 if (*entry->status & STATUS_ERROR)
22380 - atomic_inc(&vcc->stats->tx_err);
22381 + atomic_inc_unchecked(&vcc->stats->tx_err);
22382 else
22383 - atomic_inc(&vcc->stats->tx);
22384 + atomic_inc_unchecked(&vcc->stats->tx);
22385 }
22386 }
22387
22388 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22389 if (skb == NULL) {
22390 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22391
22392 - atomic_inc(&vcc->stats->rx_drop);
22393 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22394 return -ENOMEM;
22395 }
22396
22397 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22398
22399 dev_kfree_skb_any(skb);
22400
22401 - atomic_inc(&vcc->stats->rx_drop);
22402 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22403 return -ENOMEM;
22404 }
22405
22406 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22407
22408 vcc->push(vcc, skb);
22409 - atomic_inc(&vcc->stats->rx);
22410 + atomic_inc_unchecked(&vcc->stats->rx);
22411
22412 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22413
22414 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22415 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22416 fore200e->atm_dev->number,
22417 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22418 - atomic_inc(&vcc->stats->rx_err);
22419 + atomic_inc_unchecked(&vcc->stats->rx_err);
22420 }
22421 }
22422
22423 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22424 goto retry_here;
22425 }
22426
22427 - atomic_inc(&vcc->stats->tx_err);
22428 + atomic_inc_unchecked(&vcc->stats->tx_err);
22429
22430 fore200e->tx_sat++;
22431 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22432 diff -urNp linux-2.6.39.4/drivers/atm/he.c linux-2.6.39.4/drivers/atm/he.c
22433 --- linux-2.6.39.4/drivers/atm/he.c 2011-05-19 00:06:34.000000000 -0400
22434 +++ linux-2.6.39.4/drivers/atm/he.c 2011-08-05 19:44:36.000000000 -0400
22435 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22436
22437 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22438 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22439 - atomic_inc(&vcc->stats->rx_drop);
22440 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22441 goto return_host_buffers;
22442 }
22443
22444 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22445 RBRQ_LEN_ERR(he_dev->rbrq_head)
22446 ? "LEN_ERR" : "",
22447 vcc->vpi, vcc->vci);
22448 - atomic_inc(&vcc->stats->rx_err);
22449 + atomic_inc_unchecked(&vcc->stats->rx_err);
22450 goto return_host_buffers;
22451 }
22452
22453 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22454 vcc->push(vcc, skb);
22455 spin_lock(&he_dev->global_lock);
22456
22457 - atomic_inc(&vcc->stats->rx);
22458 + atomic_inc_unchecked(&vcc->stats->rx);
22459
22460 return_host_buffers:
22461 ++pdus_assembled;
22462 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22463 tpd->vcc->pop(tpd->vcc, tpd->skb);
22464 else
22465 dev_kfree_skb_any(tpd->skb);
22466 - atomic_inc(&tpd->vcc->stats->tx_err);
22467 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22468 }
22469 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22470 return;
22471 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22472 vcc->pop(vcc, skb);
22473 else
22474 dev_kfree_skb_any(skb);
22475 - atomic_inc(&vcc->stats->tx_err);
22476 + atomic_inc_unchecked(&vcc->stats->tx_err);
22477 return -EINVAL;
22478 }
22479
22480 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22481 vcc->pop(vcc, skb);
22482 else
22483 dev_kfree_skb_any(skb);
22484 - atomic_inc(&vcc->stats->tx_err);
22485 + atomic_inc_unchecked(&vcc->stats->tx_err);
22486 return -EINVAL;
22487 }
22488 #endif
22489 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22490 vcc->pop(vcc, skb);
22491 else
22492 dev_kfree_skb_any(skb);
22493 - atomic_inc(&vcc->stats->tx_err);
22494 + atomic_inc_unchecked(&vcc->stats->tx_err);
22495 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22496 return -ENOMEM;
22497 }
22498 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22499 vcc->pop(vcc, skb);
22500 else
22501 dev_kfree_skb_any(skb);
22502 - atomic_inc(&vcc->stats->tx_err);
22503 + atomic_inc_unchecked(&vcc->stats->tx_err);
22504 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22505 return -ENOMEM;
22506 }
22507 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22508 __enqueue_tpd(he_dev, tpd, cid);
22509 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22510
22511 - atomic_inc(&vcc->stats->tx);
22512 + atomic_inc_unchecked(&vcc->stats->tx);
22513
22514 return 0;
22515 }
22516 diff -urNp linux-2.6.39.4/drivers/atm/horizon.c linux-2.6.39.4/drivers/atm/horizon.c
22517 --- linux-2.6.39.4/drivers/atm/horizon.c 2011-05-19 00:06:34.000000000 -0400
22518 +++ linux-2.6.39.4/drivers/atm/horizon.c 2011-08-05 19:44:36.000000000 -0400
22519 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22520 {
22521 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22522 // VC layer stats
22523 - atomic_inc(&vcc->stats->rx);
22524 + atomic_inc_unchecked(&vcc->stats->rx);
22525 __net_timestamp(skb);
22526 // end of our responsibility
22527 vcc->push (vcc, skb);
22528 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22529 dev->tx_iovec = NULL;
22530
22531 // VC layer stats
22532 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22533 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22534
22535 // free the skb
22536 hrz_kfree_skb (skb);
22537 diff -urNp linux-2.6.39.4/drivers/atm/idt77252.c linux-2.6.39.4/drivers/atm/idt77252.c
22538 --- linux-2.6.39.4/drivers/atm/idt77252.c 2011-05-19 00:06:34.000000000 -0400
22539 +++ linux-2.6.39.4/drivers/atm/idt77252.c 2011-08-05 19:44:36.000000000 -0400
22540 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22541 else
22542 dev_kfree_skb(skb);
22543
22544 - atomic_inc(&vcc->stats->tx);
22545 + atomic_inc_unchecked(&vcc->stats->tx);
22546 }
22547
22548 atomic_dec(&scq->used);
22549 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22550 if ((sb = dev_alloc_skb(64)) == NULL) {
22551 printk("%s: Can't allocate buffers for aal0.\n",
22552 card->name);
22553 - atomic_add(i, &vcc->stats->rx_drop);
22554 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22555 break;
22556 }
22557 if (!atm_charge(vcc, sb->truesize)) {
22558 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22559 card->name);
22560 - atomic_add(i - 1, &vcc->stats->rx_drop);
22561 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22562 dev_kfree_skb(sb);
22563 break;
22564 }
22565 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22566 ATM_SKB(sb)->vcc = vcc;
22567 __net_timestamp(sb);
22568 vcc->push(vcc, sb);
22569 - atomic_inc(&vcc->stats->rx);
22570 + atomic_inc_unchecked(&vcc->stats->rx);
22571
22572 cell += ATM_CELL_PAYLOAD;
22573 }
22574 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22575 "(CDC: %08x)\n",
22576 card->name, len, rpp->len, readl(SAR_REG_CDC));
22577 recycle_rx_pool_skb(card, rpp);
22578 - atomic_inc(&vcc->stats->rx_err);
22579 + atomic_inc_unchecked(&vcc->stats->rx_err);
22580 return;
22581 }
22582 if (stat & SAR_RSQE_CRC) {
22583 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22584 recycle_rx_pool_skb(card, rpp);
22585 - atomic_inc(&vcc->stats->rx_err);
22586 + atomic_inc_unchecked(&vcc->stats->rx_err);
22587 return;
22588 }
22589 if (skb_queue_len(&rpp->queue) > 1) {
22590 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22591 RXPRINTK("%s: Can't alloc RX skb.\n",
22592 card->name);
22593 recycle_rx_pool_skb(card, rpp);
22594 - atomic_inc(&vcc->stats->rx_err);
22595 + atomic_inc_unchecked(&vcc->stats->rx_err);
22596 return;
22597 }
22598 if (!atm_charge(vcc, skb->truesize)) {
22599 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22600 __net_timestamp(skb);
22601
22602 vcc->push(vcc, skb);
22603 - atomic_inc(&vcc->stats->rx);
22604 + atomic_inc_unchecked(&vcc->stats->rx);
22605
22606 return;
22607 }
22608 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22609 __net_timestamp(skb);
22610
22611 vcc->push(vcc, skb);
22612 - atomic_inc(&vcc->stats->rx);
22613 + atomic_inc_unchecked(&vcc->stats->rx);
22614
22615 if (skb->truesize > SAR_FB_SIZE_3)
22616 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22617 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22618 if (vcc->qos.aal != ATM_AAL0) {
22619 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22620 card->name, vpi, vci);
22621 - atomic_inc(&vcc->stats->rx_drop);
22622 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22623 goto drop;
22624 }
22625
22626 if ((sb = dev_alloc_skb(64)) == NULL) {
22627 printk("%s: Can't allocate buffers for AAL0.\n",
22628 card->name);
22629 - atomic_inc(&vcc->stats->rx_err);
22630 + atomic_inc_unchecked(&vcc->stats->rx_err);
22631 goto drop;
22632 }
22633
22634 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22635 ATM_SKB(sb)->vcc = vcc;
22636 __net_timestamp(sb);
22637 vcc->push(vcc, sb);
22638 - atomic_inc(&vcc->stats->rx);
22639 + atomic_inc_unchecked(&vcc->stats->rx);
22640
22641 drop:
22642 skb_pull(queue, 64);
22643 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22644
22645 if (vc == NULL) {
22646 printk("%s: NULL connection in send().\n", card->name);
22647 - atomic_inc(&vcc->stats->tx_err);
22648 + atomic_inc_unchecked(&vcc->stats->tx_err);
22649 dev_kfree_skb(skb);
22650 return -EINVAL;
22651 }
22652 if (!test_bit(VCF_TX, &vc->flags)) {
22653 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22654 - atomic_inc(&vcc->stats->tx_err);
22655 + atomic_inc_unchecked(&vcc->stats->tx_err);
22656 dev_kfree_skb(skb);
22657 return -EINVAL;
22658 }
22659 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22660 break;
22661 default:
22662 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22663 - atomic_inc(&vcc->stats->tx_err);
22664 + atomic_inc_unchecked(&vcc->stats->tx_err);
22665 dev_kfree_skb(skb);
22666 return -EINVAL;
22667 }
22668
22669 if (skb_shinfo(skb)->nr_frags != 0) {
22670 printk("%s: No scatter-gather yet.\n", card->name);
22671 - atomic_inc(&vcc->stats->tx_err);
22672 + atomic_inc_unchecked(&vcc->stats->tx_err);
22673 dev_kfree_skb(skb);
22674 return -EINVAL;
22675 }
22676 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22677
22678 err = queue_skb(card, vc, skb, oam);
22679 if (err) {
22680 - atomic_inc(&vcc->stats->tx_err);
22681 + atomic_inc_unchecked(&vcc->stats->tx_err);
22682 dev_kfree_skb(skb);
22683 return err;
22684 }
22685 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22686 skb = dev_alloc_skb(64);
22687 if (!skb) {
22688 printk("%s: Out of memory in send_oam().\n", card->name);
22689 - atomic_inc(&vcc->stats->tx_err);
22690 + atomic_inc_unchecked(&vcc->stats->tx_err);
22691 return -ENOMEM;
22692 }
22693 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22694 diff -urNp linux-2.6.39.4/drivers/atm/iphase.c linux-2.6.39.4/drivers/atm/iphase.c
22695 --- linux-2.6.39.4/drivers/atm/iphase.c 2011-05-19 00:06:34.000000000 -0400
22696 +++ linux-2.6.39.4/drivers/atm/iphase.c 2011-08-05 19:44:36.000000000 -0400
22697 @@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
22698 status = (u_short) (buf_desc_ptr->desc_mode);
22699 if (status & (RX_CER | RX_PTE | RX_OFL))
22700 {
22701 - atomic_inc(&vcc->stats->rx_err);
22702 + atomic_inc_unchecked(&vcc->stats->rx_err);
22703 IF_ERR(printk("IA: bad packet, dropping it");)
22704 if (status & RX_CER) {
22705 IF_ERR(printk(" cause: packet CRC error\n");)
22706 @@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
22707 len = dma_addr - buf_addr;
22708 if (len > iadev->rx_buf_sz) {
22709 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22710 - atomic_inc(&vcc->stats->rx_err);
22711 + atomic_inc_unchecked(&vcc->stats->rx_err);
22712 goto out_free_desc;
22713 }
22714
22715 @@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
22716 ia_vcc = INPH_IA_VCC(vcc);
22717 if (ia_vcc == NULL)
22718 {
22719 - atomic_inc(&vcc->stats->rx_err);
22720 + atomic_inc_unchecked(&vcc->stats->rx_err);
22721 dev_kfree_skb_any(skb);
22722 atm_return(vcc, atm_guess_pdu2truesize(len));
22723 goto INCR_DLE;
22724 @@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
22725 if ((length > iadev->rx_buf_sz) || (length >
22726 (skb->len - sizeof(struct cpcs_trailer))))
22727 {
22728 - atomic_inc(&vcc->stats->rx_err);
22729 + atomic_inc_unchecked(&vcc->stats->rx_err);
22730 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22731 length, skb->len);)
22732 dev_kfree_skb_any(skb);
22733 @@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
22734
22735 IF_RX(printk("rx_dle_intr: skb push");)
22736 vcc->push(vcc,skb);
22737 - atomic_inc(&vcc->stats->rx);
22738 + atomic_inc_unchecked(&vcc->stats->rx);
22739 iadev->rx_pkt_cnt++;
22740 }
22741 INCR_DLE:
22742 @@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
22743 {
22744 struct k_sonet_stats *stats;
22745 stats = &PRIV(_ia_dev[board])->sonet_stats;
22746 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22747 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22748 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22749 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22750 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22751 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22752 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22753 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22754 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22755 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22756 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22757 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22758 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22759 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22760 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22761 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22762 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22763 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22764 }
22765 ia_cmds.status = 0;
22766 break;
22767 @@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22768 if ((desc == 0) || (desc > iadev->num_tx_desc))
22769 {
22770 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22771 - atomic_inc(&vcc->stats->tx);
22772 + atomic_inc_unchecked(&vcc->stats->tx);
22773 if (vcc->pop)
22774 vcc->pop(vcc, skb);
22775 else
22776 @@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22777 ATM_DESC(skb) = vcc->vci;
22778 skb_queue_tail(&iadev->tx_dma_q, skb);
22779
22780 - atomic_inc(&vcc->stats->tx);
22781 + atomic_inc_unchecked(&vcc->stats->tx);
22782 iadev->tx_pkt_cnt++;
22783 /* Increment transaction counter */
22784 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22785
22786 #if 0
22787 /* add flow control logic */
22788 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22789 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22790 if (iavcc->vc_desc_cnt > 10) {
22791 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22792 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22793 diff -urNp linux-2.6.39.4/drivers/atm/lanai.c linux-2.6.39.4/drivers/atm/lanai.c
22794 --- linux-2.6.39.4/drivers/atm/lanai.c 2011-05-19 00:06:34.000000000 -0400
22795 +++ linux-2.6.39.4/drivers/atm/lanai.c 2011-08-05 19:44:36.000000000 -0400
22796 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22797 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22798 lanai_endtx(lanai, lvcc);
22799 lanai_free_skb(lvcc->tx.atmvcc, skb);
22800 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22801 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22802 }
22803
22804 /* Try to fill the buffer - don't call unless there is backlog */
22805 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22806 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22807 __net_timestamp(skb);
22808 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22809 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22810 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22811 out:
22812 lvcc->rx.buf.ptr = end;
22813 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22814 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22815 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22816 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22817 lanai->stats.service_rxnotaal5++;
22818 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22819 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22820 return 0;
22821 }
22822 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22823 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22824 int bytes;
22825 read_unlock(&vcc_sklist_lock);
22826 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22827 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22828 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22829 lvcc->stats.x.aal5.service_trash++;
22830 bytes = (SERVICE_GET_END(s) * 16) -
22831 (((unsigned long) lvcc->rx.buf.ptr) -
22832 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22833 }
22834 if (s & SERVICE_STREAM) {
22835 read_unlock(&vcc_sklist_lock);
22836 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22837 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22838 lvcc->stats.x.aal5.service_stream++;
22839 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22840 "PDU on VCI %d!\n", lanai->number, vci);
22841 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22842 return 0;
22843 }
22844 DPRINTK("got rx crc error on vci %d\n", vci);
22845 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22846 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22847 lvcc->stats.x.aal5.service_rxcrc++;
22848 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22849 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22850 diff -urNp linux-2.6.39.4/drivers/atm/nicstar.c linux-2.6.39.4/drivers/atm/nicstar.c
22851 --- linux-2.6.39.4/drivers/atm/nicstar.c 2011-05-19 00:06:34.000000000 -0400
22852 +++ linux-2.6.39.4/drivers/atm/nicstar.c 2011-08-05 19:44:36.000000000 -0400
22853 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22854 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22855 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22856 card->index);
22857 - atomic_inc(&vcc->stats->tx_err);
22858 + atomic_inc_unchecked(&vcc->stats->tx_err);
22859 dev_kfree_skb_any(skb);
22860 return -EINVAL;
22861 }
22862 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22863 if (!vc->tx) {
22864 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22865 card->index);
22866 - atomic_inc(&vcc->stats->tx_err);
22867 + atomic_inc_unchecked(&vcc->stats->tx_err);
22868 dev_kfree_skb_any(skb);
22869 return -EINVAL;
22870 }
22871 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22872 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22873 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22874 card->index);
22875 - atomic_inc(&vcc->stats->tx_err);
22876 + atomic_inc_unchecked(&vcc->stats->tx_err);
22877 dev_kfree_skb_any(skb);
22878 return -EINVAL;
22879 }
22880
22881 if (skb_shinfo(skb)->nr_frags != 0) {
22882 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22883 - atomic_inc(&vcc->stats->tx_err);
22884 + atomic_inc_unchecked(&vcc->stats->tx_err);
22885 dev_kfree_skb_any(skb);
22886 return -EINVAL;
22887 }
22888 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22889 }
22890
22891 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22892 - atomic_inc(&vcc->stats->tx_err);
22893 + atomic_inc_unchecked(&vcc->stats->tx_err);
22894 dev_kfree_skb_any(skb);
22895 return -EIO;
22896 }
22897 - atomic_inc(&vcc->stats->tx);
22898 + atomic_inc_unchecked(&vcc->stats->tx);
22899
22900 return 0;
22901 }
22902 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22903 printk
22904 ("nicstar%d: Can't allocate buffers for aal0.\n",
22905 card->index);
22906 - atomic_add(i, &vcc->stats->rx_drop);
22907 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22908 break;
22909 }
22910 if (!atm_charge(vcc, sb->truesize)) {
22911 RXPRINTK
22912 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22913 card->index);
22914 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22915 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22916 dev_kfree_skb_any(sb);
22917 break;
22918 }
22919 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22920 ATM_SKB(sb)->vcc = vcc;
22921 __net_timestamp(sb);
22922 vcc->push(vcc, sb);
22923 - atomic_inc(&vcc->stats->rx);
22924 + atomic_inc_unchecked(&vcc->stats->rx);
22925 cell += ATM_CELL_PAYLOAD;
22926 }
22927
22928 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22929 if (iovb == NULL) {
22930 printk("nicstar%d: Out of iovec buffers.\n",
22931 card->index);
22932 - atomic_inc(&vcc->stats->rx_drop);
22933 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22934 recycle_rx_buf(card, skb);
22935 return;
22936 }
22937 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22938 small or large buffer itself. */
22939 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22940 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22941 - atomic_inc(&vcc->stats->rx_err);
22942 + atomic_inc_unchecked(&vcc->stats->rx_err);
22943 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22944 NS_MAX_IOVECS);
22945 NS_PRV_IOVCNT(iovb) = 0;
22946 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22947 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22948 card->index);
22949 which_list(card, skb);
22950 - atomic_inc(&vcc->stats->rx_err);
22951 + atomic_inc_unchecked(&vcc->stats->rx_err);
22952 recycle_rx_buf(card, skb);
22953 vc->rx_iov = NULL;
22954 recycle_iov_buf(card, iovb);
22955 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22956 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22957 card->index);
22958 which_list(card, skb);
22959 - atomic_inc(&vcc->stats->rx_err);
22960 + atomic_inc_unchecked(&vcc->stats->rx_err);
22961 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22962 NS_PRV_IOVCNT(iovb));
22963 vc->rx_iov = NULL;
22964 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
22965 printk(" - PDU size mismatch.\n");
22966 else
22967 printk(".\n");
22968 - atomic_inc(&vcc->stats->rx_err);
22969 + atomic_inc_unchecked(&vcc->stats->rx_err);
22970 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22971 NS_PRV_IOVCNT(iovb));
22972 vc->rx_iov = NULL;
22973 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
22974 /* skb points to a small buffer */
22975 if (!atm_charge(vcc, skb->truesize)) {
22976 push_rxbufs(card, skb);
22977 - atomic_inc(&vcc->stats->rx_drop);
22978 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22979 } else {
22980 skb_put(skb, len);
22981 dequeue_sm_buf(card, skb);
22982 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
22983 ATM_SKB(skb)->vcc = vcc;
22984 __net_timestamp(skb);
22985 vcc->push(vcc, skb);
22986 - atomic_inc(&vcc->stats->rx);
22987 + atomic_inc_unchecked(&vcc->stats->rx);
22988 }
22989 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
22990 struct sk_buff *sb;
22991 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
22992 if (len <= NS_SMBUFSIZE) {
22993 if (!atm_charge(vcc, sb->truesize)) {
22994 push_rxbufs(card, sb);
22995 - atomic_inc(&vcc->stats->rx_drop);
22996 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22997 } else {
22998 skb_put(sb, len);
22999 dequeue_sm_buf(card, sb);
23000 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23001 ATM_SKB(sb)->vcc = vcc;
23002 __net_timestamp(sb);
23003 vcc->push(vcc, sb);
23004 - atomic_inc(&vcc->stats->rx);
23005 + atomic_inc_unchecked(&vcc->stats->rx);
23006 }
23007
23008 push_rxbufs(card, skb);
23009 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23010
23011 if (!atm_charge(vcc, skb->truesize)) {
23012 push_rxbufs(card, skb);
23013 - atomic_inc(&vcc->stats->rx_drop);
23014 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23015 } else {
23016 dequeue_lg_buf(card, skb);
23017 #ifdef NS_USE_DESTRUCTORS
23018 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23019 ATM_SKB(skb)->vcc = vcc;
23020 __net_timestamp(skb);
23021 vcc->push(vcc, skb);
23022 - atomic_inc(&vcc->stats->rx);
23023 + atomic_inc_unchecked(&vcc->stats->rx);
23024 }
23025
23026 push_rxbufs(card, sb);
23027 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23028 printk
23029 ("nicstar%d: Out of huge buffers.\n",
23030 card->index);
23031 - atomic_inc(&vcc->stats->rx_drop);
23032 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23033 recycle_iovec_rx_bufs(card,
23034 (struct iovec *)
23035 iovb->data,
23036 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23037 card->hbpool.count++;
23038 } else
23039 dev_kfree_skb_any(hb);
23040 - atomic_inc(&vcc->stats->rx_drop);
23041 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23042 } else {
23043 /* Copy the small buffer to the huge buffer */
23044 sb = (struct sk_buff *)iov->iov_base;
23045 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23046 #endif /* NS_USE_DESTRUCTORS */
23047 __net_timestamp(hb);
23048 vcc->push(vcc, hb);
23049 - atomic_inc(&vcc->stats->rx);
23050 + atomic_inc_unchecked(&vcc->stats->rx);
23051 }
23052 }
23053
23054 diff -urNp linux-2.6.39.4/drivers/atm/solos-pci.c linux-2.6.39.4/drivers/atm/solos-pci.c
23055 --- linux-2.6.39.4/drivers/atm/solos-pci.c 2011-05-19 00:06:34.000000000 -0400
23056 +++ linux-2.6.39.4/drivers/atm/solos-pci.c 2011-08-05 19:44:36.000000000 -0400
23057 @@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
23058 }
23059 atm_charge(vcc, skb->truesize);
23060 vcc->push(vcc, skb);
23061 - atomic_inc(&vcc->stats->rx);
23062 + atomic_inc_unchecked(&vcc->stats->rx);
23063 break;
23064
23065 case PKT_STATUS:
23066 @@ -900,6 +900,8 @@ static int print_buffer(struct sk_buff *
23067 char msg[500];
23068 char item[10];
23069
23070 + pax_track_stack();
23071 +
23072 len = buf->len;
23073 for (i = 0; i < len; i++){
23074 if(i % 8 == 0)
23075 @@ -1009,7 +1011,7 @@ static uint32_t fpga_tx(struct solos_car
23076 vcc = SKB_CB(oldskb)->vcc;
23077
23078 if (vcc) {
23079 - atomic_inc(&vcc->stats->tx);
23080 + atomic_inc_unchecked(&vcc->stats->tx);
23081 solos_pop(vcc, oldskb);
23082 } else
23083 dev_kfree_skb_irq(oldskb);
23084 diff -urNp linux-2.6.39.4/drivers/atm/suni.c linux-2.6.39.4/drivers/atm/suni.c
23085 --- linux-2.6.39.4/drivers/atm/suni.c 2011-05-19 00:06:34.000000000 -0400
23086 +++ linux-2.6.39.4/drivers/atm/suni.c 2011-08-05 19:44:36.000000000 -0400
23087 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23088
23089
23090 #define ADD_LIMITED(s,v) \
23091 - atomic_add((v),&stats->s); \
23092 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23093 + atomic_add_unchecked((v),&stats->s); \
23094 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23095
23096
23097 static void suni_hz(unsigned long from_timer)
23098 diff -urNp linux-2.6.39.4/drivers/atm/uPD98402.c linux-2.6.39.4/drivers/atm/uPD98402.c
23099 --- linux-2.6.39.4/drivers/atm/uPD98402.c 2011-05-19 00:06:34.000000000 -0400
23100 +++ linux-2.6.39.4/drivers/atm/uPD98402.c 2011-08-05 19:44:36.000000000 -0400
23101 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23102 struct sonet_stats tmp;
23103 int error = 0;
23104
23105 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23106 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23107 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23108 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23109 if (zero && !error) {
23110 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23111
23112
23113 #define ADD_LIMITED(s,v) \
23114 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23115 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23116 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23117 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23118 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23119 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23120
23121
23122 static void stat_event(struct atm_dev *dev)
23123 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23124 if (reason & uPD98402_INT_PFM) stat_event(dev);
23125 if (reason & uPD98402_INT_PCO) {
23126 (void) GET(PCOCR); /* clear interrupt cause */
23127 - atomic_add(GET(HECCT),
23128 + atomic_add_unchecked(GET(HECCT),
23129 &PRIV(dev)->sonet_stats.uncorr_hcs);
23130 }
23131 if ((reason & uPD98402_INT_RFO) &&
23132 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23133 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23134 uPD98402_INT_LOS),PIMR); /* enable them */
23135 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23136 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23137 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23138 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23139 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23140 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23141 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23142 return 0;
23143 }
23144
23145 diff -urNp linux-2.6.39.4/drivers/atm/zatm.c linux-2.6.39.4/drivers/atm/zatm.c
23146 --- linux-2.6.39.4/drivers/atm/zatm.c 2011-05-19 00:06:34.000000000 -0400
23147 +++ linux-2.6.39.4/drivers/atm/zatm.c 2011-08-05 19:44:36.000000000 -0400
23148 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23149 }
23150 if (!size) {
23151 dev_kfree_skb_irq(skb);
23152 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23153 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23154 continue;
23155 }
23156 if (!atm_charge(vcc,skb->truesize)) {
23157 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23158 skb->len = size;
23159 ATM_SKB(skb)->vcc = vcc;
23160 vcc->push(vcc,skb);
23161 - atomic_inc(&vcc->stats->rx);
23162 + atomic_inc_unchecked(&vcc->stats->rx);
23163 }
23164 zout(pos & 0xffff,MTA(mbx));
23165 #if 0 /* probably a stupid idea */
23166 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23167 skb_queue_head(&zatm_vcc->backlog,skb);
23168 break;
23169 }
23170 - atomic_inc(&vcc->stats->tx);
23171 + atomic_inc_unchecked(&vcc->stats->tx);
23172 wake_up(&zatm_vcc->tx_wait);
23173 }
23174
23175 diff -urNp linux-2.6.39.4/drivers/base/power/wakeup.c linux-2.6.39.4/drivers/base/power/wakeup.c
23176 --- linux-2.6.39.4/drivers/base/power/wakeup.c 2011-05-19 00:06:34.000000000 -0400
23177 +++ linux-2.6.39.4/drivers/base/power/wakeup.c 2011-08-05 19:44:36.000000000 -0400
23178 @@ -29,14 +29,14 @@ bool events_check_enabled;
23179 * They need to be modified together atomically, so it's better to use one
23180 * atomic variable to hold them both.
23181 */
23182 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23183 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23184
23185 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23186 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23187
23188 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23189 {
23190 - unsigned int comb = atomic_read(&combined_event_count);
23191 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23192
23193 *cnt = (comb >> IN_PROGRESS_BITS);
23194 *inpr = comb & MAX_IN_PROGRESS;
23195 @@ -351,7 +351,7 @@ static void wakeup_source_activate(struc
23196 ws->last_time = ktime_get();
23197
23198 /* Increment the counter of events in progress. */
23199 - atomic_inc(&combined_event_count);
23200 + atomic_inc_unchecked(&combined_event_count);
23201 }
23202
23203 /**
23204 @@ -441,7 +441,7 @@ static void wakeup_source_deactivate(str
23205 * Increment the counter of registered wakeup events and decrement the
23206 * couter of wakeup events in progress simultaneously.
23207 */
23208 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23209 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23210 }
23211
23212 /**
23213 diff -urNp linux-2.6.39.4/drivers/block/cciss.c linux-2.6.39.4/drivers/block/cciss.c
23214 --- linux-2.6.39.4/drivers/block/cciss.c 2011-05-19 00:06:34.000000000 -0400
23215 +++ linux-2.6.39.4/drivers/block/cciss.c 2011-08-05 20:34:06.000000000 -0400
23216 @@ -1151,6 +1151,8 @@ static int cciss_ioctl32_passthru(struct
23217 int err;
23218 u32 cp;
23219
23220 + memset(&arg64, 0, sizeof(arg64));
23221 +
23222 err = 0;
23223 err |=
23224 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23225 @@ -2933,7 +2935,7 @@ static void start_io(ctlr_info_t *h)
23226 while (!list_empty(&h->reqQ)) {
23227 c = list_entry(h->reqQ.next, CommandList_struct, list);
23228 /* can't do anything if fifo is full */
23229 - if ((h->access.fifo_full(h))) {
23230 + if ((h->access->fifo_full(h))) {
23231 dev_warn(&h->pdev->dev, "fifo full\n");
23232 break;
23233 }
23234 @@ -2943,7 +2945,7 @@ static void start_io(ctlr_info_t *h)
23235 h->Qdepth--;
23236
23237 /* Tell the controller execute command */
23238 - h->access.submit_command(h, c);
23239 + h->access->submit_command(h, c);
23240
23241 /* Put job onto the completed Q */
23242 addQ(&h->cmpQ, c);
23243 @@ -3369,17 +3371,17 @@ startio:
23244
23245 static inline unsigned long get_next_completion(ctlr_info_t *h)
23246 {
23247 - return h->access.command_completed(h);
23248 + return h->access->command_completed(h);
23249 }
23250
23251 static inline int interrupt_pending(ctlr_info_t *h)
23252 {
23253 - return h->access.intr_pending(h);
23254 + return h->access->intr_pending(h);
23255 }
23256
23257 static inline long interrupt_not_for_us(ctlr_info_t *h)
23258 {
23259 - return ((h->access.intr_pending(h) == 0) ||
23260 + return ((h->access->intr_pending(h) == 0) ||
23261 (h->interrupts_enabled == 0));
23262 }
23263
23264 @@ -3412,7 +3414,7 @@ static inline u32 next_command(ctlr_info
23265 u32 a;
23266
23267 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23268 - return h->access.command_completed(h);
23269 + return h->access->command_completed(h);
23270
23271 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23272 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23273 @@ -3910,7 +3912,7 @@ static void __devinit cciss_put_controll
23274 trans_support & CFGTBL_Trans_use_short_tags);
23275
23276 /* Change the access methods to the performant access methods */
23277 - h->access = SA5_performant_access;
23278 + h->access = &SA5_performant_access;
23279 h->transMethod = CFGTBL_Trans_Performant;
23280
23281 return;
23282 @@ -4179,7 +4181,7 @@ static int __devinit cciss_pci_init(ctlr
23283 if (prod_index < 0)
23284 return -ENODEV;
23285 h->product_name = products[prod_index].product_name;
23286 - h->access = *(products[prod_index].access);
23287 + h->access = products[prod_index].access;
23288
23289 if (cciss_board_disabled(h)) {
23290 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23291 @@ -4661,7 +4663,7 @@ static int __devinit cciss_init_one(stru
23292 }
23293
23294 /* make sure the board interrupts are off */
23295 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23296 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23297 if (h->msi_vector || h->msix_vector) {
23298 if (request_irq(h->intr[PERF_MODE_INT],
23299 do_cciss_msix_intr,
23300 @@ -4744,7 +4746,7 @@ static int __devinit cciss_init_one(stru
23301 cciss_scsi_setup(h);
23302
23303 /* Turn the interrupts on so we can service requests */
23304 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23305 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23306
23307 /* Get the firmware version */
23308 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23309 @@ -4828,7 +4830,7 @@ static void cciss_shutdown(struct pci_de
23310 kfree(flush_buf);
23311 if (return_code != IO_OK)
23312 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23313 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23314 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23315 free_irq(h->intr[PERF_MODE_INT], h);
23316 }
23317
23318 diff -urNp linux-2.6.39.4/drivers/block/cciss.h linux-2.6.39.4/drivers/block/cciss.h
23319 --- linux-2.6.39.4/drivers/block/cciss.h 2011-05-19 00:06:34.000000000 -0400
23320 +++ linux-2.6.39.4/drivers/block/cciss.h 2011-08-05 20:34:06.000000000 -0400
23321 @@ -100,7 +100,7 @@ struct ctlr_info
23322 /* information about each logical volume */
23323 drive_info_struct *drv[CISS_MAX_LUN];
23324
23325 - struct access_method access;
23326 + struct access_method *access;
23327
23328 /* queue and queue Info */
23329 struct list_head reqQ;
23330 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.c linux-2.6.39.4/drivers/block/cpqarray.c
23331 --- linux-2.6.39.4/drivers/block/cpqarray.c 2011-05-19 00:06:34.000000000 -0400
23332 +++ linux-2.6.39.4/drivers/block/cpqarray.c 2011-08-05 20:34:06.000000000 -0400
23333 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23334 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23335 goto Enomem4;
23336 }
23337 - hba[i]->access.set_intr_mask(hba[i], 0);
23338 + hba[i]->access->set_intr_mask(hba[i], 0);
23339 if (request_irq(hba[i]->intr, do_ida_intr,
23340 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23341 {
23342 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23343 add_timer(&hba[i]->timer);
23344
23345 /* Enable IRQ now that spinlock and rate limit timer are set up */
23346 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23347 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23348
23349 for(j=0; j<NWD; j++) {
23350 struct gendisk *disk = ida_gendisk[i][j];
23351 @@ -694,7 +694,7 @@ DBGINFO(
23352 for(i=0; i<NR_PRODUCTS; i++) {
23353 if (board_id == products[i].board_id) {
23354 c->product_name = products[i].product_name;
23355 - c->access = *(products[i].access);
23356 + c->access = products[i].access;
23357 break;
23358 }
23359 }
23360 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23361 hba[ctlr]->intr = intr;
23362 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23363 hba[ctlr]->product_name = products[j].product_name;
23364 - hba[ctlr]->access = *(products[j].access);
23365 + hba[ctlr]->access = products[j].access;
23366 hba[ctlr]->ctlr = ctlr;
23367 hba[ctlr]->board_id = board_id;
23368 hba[ctlr]->pci_dev = NULL; /* not PCI */
23369 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23370 struct scatterlist tmp_sg[SG_MAX];
23371 int i, dir, seg;
23372
23373 + pax_track_stack();
23374 +
23375 queue_next:
23376 creq = blk_peek_request(q);
23377 if (!creq)
23378 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23379
23380 while((c = h->reqQ) != NULL) {
23381 /* Can't do anything if we're busy */
23382 - if (h->access.fifo_full(h) == 0)
23383 + if (h->access->fifo_full(h) == 0)
23384 return;
23385
23386 /* Get the first entry from the request Q */
23387 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23388 h->Qdepth--;
23389
23390 /* Tell the controller to do our bidding */
23391 - h->access.submit_command(h, c);
23392 + h->access->submit_command(h, c);
23393
23394 /* Get onto the completion Q */
23395 addQ(&h->cmpQ, c);
23396 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23397 unsigned long flags;
23398 __u32 a,a1;
23399
23400 - istat = h->access.intr_pending(h);
23401 + istat = h->access->intr_pending(h);
23402 /* Is this interrupt for us? */
23403 if (istat == 0)
23404 return IRQ_NONE;
23405 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23406 */
23407 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23408 if (istat & FIFO_NOT_EMPTY) {
23409 - while((a = h->access.command_completed(h))) {
23410 + while((a = h->access->command_completed(h))) {
23411 a1 = a; a &= ~3;
23412 if ((c = h->cmpQ) == NULL)
23413 {
23414 @@ -1449,11 +1451,11 @@ static int sendcmd(
23415 /*
23416 * Disable interrupt
23417 */
23418 - info_p->access.set_intr_mask(info_p, 0);
23419 + info_p->access->set_intr_mask(info_p, 0);
23420 /* Make sure there is room in the command FIFO */
23421 /* Actually it should be completely empty at this time. */
23422 for (i = 200000; i > 0; i--) {
23423 - temp = info_p->access.fifo_full(info_p);
23424 + temp = info_p->access->fifo_full(info_p);
23425 if (temp != 0) {
23426 break;
23427 }
23428 @@ -1466,7 +1468,7 @@ DBG(
23429 /*
23430 * Send the cmd
23431 */
23432 - info_p->access.submit_command(info_p, c);
23433 + info_p->access->submit_command(info_p, c);
23434 complete = pollcomplete(ctlr);
23435
23436 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23437 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23438 * we check the new geometry. Then turn interrupts back on when
23439 * we're done.
23440 */
23441 - host->access.set_intr_mask(host, 0);
23442 + host->access->set_intr_mask(host, 0);
23443 getgeometry(ctlr);
23444 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23445 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23446
23447 for(i=0; i<NWD; i++) {
23448 struct gendisk *disk = ida_gendisk[ctlr][i];
23449 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23450 /* Wait (up to 2 seconds) for a command to complete */
23451
23452 for (i = 200000; i > 0; i--) {
23453 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23454 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23455 if (done == 0) {
23456 udelay(10); /* a short fixed delay */
23457 } else
23458 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.h linux-2.6.39.4/drivers/block/cpqarray.h
23459 --- linux-2.6.39.4/drivers/block/cpqarray.h 2011-05-19 00:06:34.000000000 -0400
23460 +++ linux-2.6.39.4/drivers/block/cpqarray.h 2011-08-05 20:34:06.000000000 -0400
23461 @@ -99,7 +99,7 @@ struct ctlr_info {
23462 drv_info_t drv[NWD];
23463 struct proc_dir_entry *proc;
23464
23465 - struct access_method access;
23466 + struct access_method *access;
23467
23468 cmdlist_t *reqQ;
23469 cmdlist_t *cmpQ;
23470 diff -urNp linux-2.6.39.4/drivers/block/DAC960.c linux-2.6.39.4/drivers/block/DAC960.c
23471 --- linux-2.6.39.4/drivers/block/DAC960.c 2011-05-19 00:06:34.000000000 -0400
23472 +++ linux-2.6.39.4/drivers/block/DAC960.c 2011-08-05 19:44:36.000000000 -0400
23473 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23474 unsigned long flags;
23475 int Channel, TargetID;
23476
23477 + pax_track_stack();
23478 +
23479 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23480 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23481 sizeof(DAC960_SCSI_Inquiry_T) +
23482 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_int.h linux-2.6.39.4/drivers/block/drbd/drbd_int.h
23483 --- linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-05-19 00:06:34.000000000 -0400
23484 +++ linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-08-05 19:44:36.000000000 -0400
23485 @@ -736,7 +736,7 @@ struct drbd_request;
23486 struct drbd_epoch {
23487 struct list_head list;
23488 unsigned int barrier_nr;
23489 - atomic_t epoch_size; /* increased on every request added. */
23490 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23491 atomic_t active; /* increased on every req. added, and dec on every finished. */
23492 unsigned long flags;
23493 };
23494 @@ -1108,7 +1108,7 @@ struct drbd_conf {
23495 void *int_dig_in;
23496 void *int_dig_vv;
23497 wait_queue_head_t seq_wait;
23498 - atomic_t packet_seq;
23499 + atomic_unchecked_t packet_seq;
23500 unsigned int peer_seq;
23501 spinlock_t peer_seq_lock;
23502 unsigned int minor;
23503 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_main.c linux-2.6.39.4/drivers/block/drbd/drbd_main.c
23504 --- linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-05-19 00:06:34.000000000 -0400
23505 +++ linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-08-05 19:44:36.000000000 -0400
23506 @@ -2387,7 +2387,7 @@ static int _drbd_send_ack(struct drbd_co
23507 p.sector = sector;
23508 p.block_id = block_id;
23509 p.blksize = blksize;
23510 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23511 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23512
23513 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23514 return false;
23515 @@ -2686,7 +2686,7 @@ int drbd_send_dblock(struct drbd_conf *m
23516 p.sector = cpu_to_be64(req->sector);
23517 p.block_id = (unsigned long)req;
23518 p.seq_num = cpu_to_be32(req->seq_num =
23519 - atomic_add_return(1, &mdev->packet_seq));
23520 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23521
23522 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23523
23524 @@ -2971,7 +2971,7 @@ void drbd_init_set_defaults(struct drbd_
23525 atomic_set(&mdev->unacked_cnt, 0);
23526 atomic_set(&mdev->local_cnt, 0);
23527 atomic_set(&mdev->net_cnt, 0);
23528 - atomic_set(&mdev->packet_seq, 0);
23529 + atomic_set_unchecked(&mdev->packet_seq, 0);
23530 atomic_set(&mdev->pp_in_use, 0);
23531 atomic_set(&mdev->pp_in_use_by_net, 0);
23532 atomic_set(&mdev->rs_sect_in, 0);
23533 @@ -3051,8 +3051,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23534 mdev->receiver.t_state);
23535
23536 /* no need to lock it, I'm the only thread alive */
23537 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23538 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23539 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23540 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23541 mdev->al_writ_cnt =
23542 mdev->bm_writ_cnt =
23543 mdev->read_cnt =
23544 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_nl.c linux-2.6.39.4/drivers/block/drbd/drbd_nl.c
23545 --- linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-05-19 00:06:34.000000000 -0400
23546 +++ linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-08-05 19:44:36.000000000 -0400
23547 @@ -2298,7 +2298,7 @@ static void drbd_connector_callback(stru
23548 module_put(THIS_MODULE);
23549 }
23550
23551 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23552 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23553
23554 static unsigned short *
23555 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23556 @@ -2369,7 +2369,7 @@ void drbd_bcast_state(struct drbd_conf *
23557 cn_reply->id.idx = CN_IDX_DRBD;
23558 cn_reply->id.val = CN_VAL_DRBD;
23559
23560 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23561 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23562 cn_reply->ack = 0; /* not used here. */
23563 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23564 (int)((char *)tl - (char *)reply->tag_list);
23565 @@ -2401,7 +2401,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23566 cn_reply->id.idx = CN_IDX_DRBD;
23567 cn_reply->id.val = CN_VAL_DRBD;
23568
23569 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23570 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23571 cn_reply->ack = 0; /* not used here. */
23572 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23573 (int)((char *)tl - (char *)reply->tag_list);
23574 @@ -2479,7 +2479,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23575 cn_reply->id.idx = CN_IDX_DRBD;
23576 cn_reply->id.val = CN_VAL_DRBD;
23577
23578 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23579 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23580 cn_reply->ack = 0; // not used here.
23581 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23582 (int)((char*)tl - (char*)reply->tag_list);
23583 @@ -2518,7 +2518,7 @@ void drbd_bcast_sync_progress(struct drb
23584 cn_reply->id.idx = CN_IDX_DRBD;
23585 cn_reply->id.val = CN_VAL_DRBD;
23586
23587 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23588 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23589 cn_reply->ack = 0; /* not used here. */
23590 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23591 (int)((char *)tl - (char *)reply->tag_list);
23592 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c
23593 --- linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-05-19 00:06:34.000000000 -0400
23594 +++ linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-08-05 19:44:36.000000000 -0400
23595 @@ -894,7 +894,7 @@ retry:
23596 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23597 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23598
23599 - atomic_set(&mdev->packet_seq, 0);
23600 + atomic_set_unchecked(&mdev->packet_seq, 0);
23601 mdev->peer_seq = 0;
23602
23603 drbd_thread_start(&mdev->asender);
23604 @@ -990,7 +990,7 @@ static enum finish_epoch drbd_may_finish
23605 do {
23606 next_epoch = NULL;
23607
23608 - epoch_size = atomic_read(&epoch->epoch_size);
23609 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23610
23611 switch (ev & ~EV_CLEANUP) {
23612 case EV_PUT:
23613 @@ -1025,7 +1025,7 @@ static enum finish_epoch drbd_may_finish
23614 rv = FE_DESTROYED;
23615 } else {
23616 epoch->flags = 0;
23617 - atomic_set(&epoch->epoch_size, 0);
23618 + atomic_set_unchecked(&epoch->epoch_size, 0);
23619 /* atomic_set(&epoch->active, 0); is already zero */
23620 if (rv == FE_STILL_LIVE)
23621 rv = FE_RECYCLED;
23622 @@ -1196,14 +1196,14 @@ static int receive_Barrier(struct drbd_c
23623 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23624 drbd_flush(mdev);
23625
23626 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23627 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23628 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23629 if (epoch)
23630 break;
23631 }
23632
23633 epoch = mdev->current_epoch;
23634 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23635 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23636
23637 D_ASSERT(atomic_read(&epoch->active) == 0);
23638 D_ASSERT(epoch->flags == 0);
23639 @@ -1215,11 +1215,11 @@ static int receive_Barrier(struct drbd_c
23640 }
23641
23642 epoch->flags = 0;
23643 - atomic_set(&epoch->epoch_size, 0);
23644 + atomic_set_unchecked(&epoch->epoch_size, 0);
23645 atomic_set(&epoch->active, 0);
23646
23647 spin_lock(&mdev->epoch_lock);
23648 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23649 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23650 list_add(&epoch->list, &mdev->current_epoch->list);
23651 mdev->current_epoch = epoch;
23652 mdev->epochs++;
23653 @@ -1668,7 +1668,7 @@ static int receive_Data(struct drbd_conf
23654 spin_unlock(&mdev->peer_seq_lock);
23655
23656 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23657 - atomic_inc(&mdev->current_epoch->epoch_size);
23658 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23659 return drbd_drain_block(mdev, data_size);
23660 }
23661
23662 @@ -1694,7 +1694,7 @@ static int receive_Data(struct drbd_conf
23663
23664 spin_lock(&mdev->epoch_lock);
23665 e->epoch = mdev->current_epoch;
23666 - atomic_inc(&e->epoch->epoch_size);
23667 + atomic_inc_unchecked(&e->epoch->epoch_size);
23668 atomic_inc(&e->epoch->active);
23669 spin_unlock(&mdev->epoch_lock);
23670
23671 @@ -3905,7 +3905,7 @@ static void drbd_disconnect(struct drbd_
23672 D_ASSERT(list_empty(&mdev->done_ee));
23673
23674 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23675 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23676 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23677 D_ASSERT(list_empty(&mdev->current_epoch->list));
23678 }
23679
23680 diff -urNp linux-2.6.39.4/drivers/block/nbd.c linux-2.6.39.4/drivers/block/nbd.c
23681 --- linux-2.6.39.4/drivers/block/nbd.c 2011-06-25 12:55:22.000000000 -0400
23682 +++ linux-2.6.39.4/drivers/block/nbd.c 2011-08-05 19:44:36.000000000 -0400
23683 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23684 struct kvec iov;
23685 sigset_t blocked, oldset;
23686
23687 + pax_track_stack();
23688 +
23689 if (unlikely(!sock)) {
23690 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23691 lo->disk->disk_name, (send ? "send" : "recv"));
23692 @@ -571,6 +573,8 @@ static void do_nbd_request(struct reques
23693 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23694 unsigned int cmd, unsigned long arg)
23695 {
23696 + pax_track_stack();
23697 +
23698 switch (cmd) {
23699 case NBD_DISCONNECT: {
23700 struct request sreq;
23701 diff -urNp linux-2.6.39.4/drivers/char/agp/frontend.c linux-2.6.39.4/drivers/char/agp/frontend.c
23702 --- linux-2.6.39.4/drivers/char/agp/frontend.c 2011-05-19 00:06:34.000000000 -0400
23703 +++ linux-2.6.39.4/drivers/char/agp/frontend.c 2011-08-05 19:44:36.000000000 -0400
23704 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23705 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23706 return -EFAULT;
23707
23708 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23709 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23710 return -EFAULT;
23711
23712 client = agp_find_client_by_pid(reserve.pid);
23713 diff -urNp linux-2.6.39.4/drivers/char/briq_panel.c linux-2.6.39.4/drivers/char/briq_panel.c
23714 --- linux-2.6.39.4/drivers/char/briq_panel.c 2011-05-19 00:06:34.000000000 -0400
23715 +++ linux-2.6.39.4/drivers/char/briq_panel.c 2011-08-05 19:44:36.000000000 -0400
23716 @@ -9,6 +9,7 @@
23717 #include <linux/types.h>
23718 #include <linux/errno.h>
23719 #include <linux/tty.h>
23720 +#include <linux/mutex.h>
23721 #include <linux/timer.h>
23722 #include <linux/kernel.h>
23723 #include <linux/wait.h>
23724 @@ -34,6 +35,7 @@ static int vfd_is_open;
23725 static unsigned char vfd[40];
23726 static int vfd_cursor;
23727 static unsigned char ledpb, led;
23728 +static DEFINE_MUTEX(vfd_mutex);
23729
23730 static void update_vfd(void)
23731 {
23732 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23733 if (!vfd_is_open)
23734 return -EBUSY;
23735
23736 + mutex_lock(&vfd_mutex);
23737 for (;;) {
23738 char c;
23739 if (!indx)
23740 break;
23741 - if (get_user(c, buf))
23742 + if (get_user(c, buf)) {
23743 + mutex_unlock(&vfd_mutex);
23744 return -EFAULT;
23745 + }
23746 if (esc) {
23747 set_led(c);
23748 esc = 0;
23749 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23750 buf++;
23751 }
23752 update_vfd();
23753 + mutex_unlock(&vfd_mutex);
23754
23755 return len;
23756 }
23757 diff -urNp linux-2.6.39.4/drivers/char/genrtc.c linux-2.6.39.4/drivers/char/genrtc.c
23758 --- linux-2.6.39.4/drivers/char/genrtc.c 2011-05-19 00:06:34.000000000 -0400
23759 +++ linux-2.6.39.4/drivers/char/genrtc.c 2011-08-05 19:44:36.000000000 -0400
23760 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23761 switch (cmd) {
23762
23763 case RTC_PLL_GET:
23764 + memset(&pll, 0, sizeof(pll));
23765 if (get_rtc_pll(&pll))
23766 return -EINVAL;
23767 else
23768 diff -urNp linux-2.6.39.4/drivers/char/hpet.c linux-2.6.39.4/drivers/char/hpet.c
23769 --- linux-2.6.39.4/drivers/char/hpet.c 2011-05-19 00:06:34.000000000 -0400
23770 +++ linux-2.6.39.4/drivers/char/hpet.c 2011-08-05 19:44:36.000000000 -0400
23771 @@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di
23772 }
23773
23774 static int
23775 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23776 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23777 struct hpet_info *info)
23778 {
23779 struct hpet_timer __iomem *timer;
23780 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c
23781 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-05-19 00:06:34.000000000 -0400
23782 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-05 20:34:06.000000000 -0400
23783 @@ -414,7 +414,7 @@ struct ipmi_smi {
23784 struct proc_dir_entry *proc_dir;
23785 char proc_dir_name[10];
23786
23787 - atomic_t stats[IPMI_NUM_STATS];
23788 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23789
23790 /*
23791 * run_to_completion duplicate of smb_info, smi_info
23792 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23793
23794
23795 #define ipmi_inc_stat(intf, stat) \
23796 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23797 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23798 #define ipmi_get_stat(intf, stat) \
23799 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23800 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23801
23802 static int is_lan_addr(struct ipmi_addr *addr)
23803 {
23804 @@ -2844,7 +2844,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23805 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23806 init_waitqueue_head(&intf->waitq);
23807 for (i = 0; i < IPMI_NUM_STATS; i++)
23808 - atomic_set(&intf->stats[i], 0);
23809 + atomic_set_unchecked(&intf->stats[i], 0);
23810
23811 intf->proc_dir = NULL;
23812
23813 @@ -4196,6 +4196,8 @@ static void send_panic_events(char *str)
23814 struct ipmi_smi_msg smi_msg;
23815 struct ipmi_recv_msg recv_msg;
23816
23817 + pax_track_stack();
23818 +
23819 si = (struct ipmi_system_interface_addr *) &addr;
23820 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23821 si->channel = IPMI_BMC_CHANNEL;
23822 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c
23823 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-05-19 00:06:34.000000000 -0400
23824 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-05 19:44:36.000000000 -0400
23825 @@ -276,7 +276,7 @@ struct smi_info {
23826 unsigned char slave_addr;
23827
23828 /* Counters and things for the proc filesystem. */
23829 - atomic_t stats[SI_NUM_STATS];
23830 + atomic_unchecked_t stats[SI_NUM_STATS];
23831
23832 struct task_struct *thread;
23833
23834 @@ -285,9 +285,9 @@ struct smi_info {
23835 };
23836
23837 #define smi_inc_stat(smi, stat) \
23838 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23839 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23840 #define smi_get_stat(smi, stat) \
23841 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23842 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23843
23844 #define SI_MAX_PARMS 4
23845
23846 @@ -3198,7 +3198,7 @@ static int try_smi_init(struct smi_info
23847 atomic_set(&new_smi->req_events, 0);
23848 new_smi->run_to_completion = 0;
23849 for (i = 0; i < SI_NUM_STATS; i++)
23850 - atomic_set(&new_smi->stats[i], 0);
23851 + atomic_set_unchecked(&new_smi->stats[i], 0);
23852
23853 new_smi->interrupt_disabled = 1;
23854 atomic_set(&new_smi->stop_operation, 0);
23855 diff -urNp linux-2.6.39.4/drivers/char/Kconfig linux-2.6.39.4/drivers/char/Kconfig
23856 --- linux-2.6.39.4/drivers/char/Kconfig 2011-05-19 00:06:34.000000000 -0400
23857 +++ linux-2.6.39.4/drivers/char/Kconfig 2011-08-05 19:44:36.000000000 -0400
23858 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23859
23860 config DEVKMEM
23861 bool "/dev/kmem virtual device support"
23862 - default y
23863 + default n
23864 + depends on !GRKERNSEC_KMEM
23865 help
23866 Say Y here if you want to support the /dev/kmem device. The
23867 /dev/kmem device is rarely used, but can be used for certain
23868 @@ -596,6 +597,7 @@ config DEVPORT
23869 bool
23870 depends on !M68K
23871 depends on ISA || PCI
23872 + depends on !GRKERNSEC_KMEM
23873 default y
23874
23875 source "drivers/s390/char/Kconfig"
23876 diff -urNp linux-2.6.39.4/drivers/char/mem.c linux-2.6.39.4/drivers/char/mem.c
23877 --- linux-2.6.39.4/drivers/char/mem.c 2011-05-19 00:06:34.000000000 -0400
23878 +++ linux-2.6.39.4/drivers/char/mem.c 2011-08-05 19:44:36.000000000 -0400
23879 @@ -18,6 +18,7 @@
23880 #include <linux/raw.h>
23881 #include <linux/tty.h>
23882 #include <linux/capability.h>
23883 +#include <linux/security.h>
23884 #include <linux/ptrace.h>
23885 #include <linux/device.h>
23886 #include <linux/highmem.h>
23887 @@ -34,6 +35,10 @@
23888 # include <linux/efi.h>
23889 #endif
23890
23891 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23892 +extern struct file_operations grsec_fops;
23893 +#endif
23894 +
23895 static inline unsigned long size_inside_page(unsigned long start,
23896 unsigned long size)
23897 {
23898 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23899
23900 while (cursor < to) {
23901 if (!devmem_is_allowed(pfn)) {
23902 +#ifdef CONFIG_GRKERNSEC_KMEM
23903 + gr_handle_mem_readwrite(from, to);
23904 +#else
23905 printk(KERN_INFO
23906 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23907 current->comm, from, to);
23908 +#endif
23909 return 0;
23910 }
23911 cursor += PAGE_SIZE;
23912 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23913 }
23914 return 1;
23915 }
23916 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23917 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23918 +{
23919 + return 0;
23920 +}
23921 #else
23922 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23923 {
23924 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23925
23926 while (count > 0) {
23927 unsigned long remaining;
23928 + char *temp;
23929
23930 sz = size_inside_page(p, count);
23931
23932 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23933 if (!ptr)
23934 return -EFAULT;
23935
23936 - remaining = copy_to_user(buf, ptr, sz);
23937 +#ifdef CONFIG_PAX_USERCOPY
23938 + temp = kmalloc(sz, GFP_KERNEL);
23939 + if (!temp) {
23940 + unxlate_dev_mem_ptr(p, ptr);
23941 + return -ENOMEM;
23942 + }
23943 + memcpy(temp, ptr, sz);
23944 +#else
23945 + temp = ptr;
23946 +#endif
23947 +
23948 + remaining = copy_to_user(buf, temp, sz);
23949 +
23950 +#ifdef CONFIG_PAX_USERCOPY
23951 + kfree(temp);
23952 +#endif
23953 +
23954 unxlate_dev_mem_ptr(p, ptr);
23955 if (remaining)
23956 return -EFAULT;
23957 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23958 size_t count, loff_t *ppos)
23959 {
23960 unsigned long p = *ppos;
23961 - ssize_t low_count, read, sz;
23962 + ssize_t low_count, read, sz, err = 0;
23963 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23964 - int err = 0;
23965
23966 read = 0;
23967 if (p < (unsigned long) high_memory) {
23968 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
23969 }
23970 #endif
23971 while (low_count > 0) {
23972 + char *temp;
23973 +
23974 sz = size_inside_page(p, low_count);
23975
23976 /*
23977 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
23978 */
23979 kbuf = xlate_dev_kmem_ptr((char *)p);
23980
23981 - if (copy_to_user(buf, kbuf, sz))
23982 +#ifdef CONFIG_PAX_USERCOPY
23983 + temp = kmalloc(sz, GFP_KERNEL);
23984 + if (!temp)
23985 + return -ENOMEM;
23986 + memcpy(temp, kbuf, sz);
23987 +#else
23988 + temp = kbuf;
23989 +#endif
23990 +
23991 + err = copy_to_user(buf, temp, sz);
23992 +
23993 +#ifdef CONFIG_PAX_USERCOPY
23994 + kfree(temp);
23995 +#endif
23996 +
23997 + if (err)
23998 return -EFAULT;
23999 buf += sz;
24000 p += sz;
24001 @@ -854,6 +901,9 @@ static const struct memdev {
24002 #ifdef CONFIG_CRASH_DUMP
24003 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24004 #endif
24005 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24006 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24007 +#endif
24008 };
24009
24010 static int memory_open(struct inode *inode, struct file *filp)
24011 diff -urNp linux-2.6.39.4/drivers/char/nvram.c linux-2.6.39.4/drivers/char/nvram.c
24012 --- linux-2.6.39.4/drivers/char/nvram.c 2011-05-19 00:06:34.000000000 -0400
24013 +++ linux-2.6.39.4/drivers/char/nvram.c 2011-08-05 19:44:36.000000000 -0400
24014 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24015
24016 spin_unlock_irq(&rtc_lock);
24017
24018 - if (copy_to_user(buf, contents, tmp - contents))
24019 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24020 return -EFAULT;
24021
24022 *ppos = i;
24023 diff -urNp linux-2.6.39.4/drivers/char/random.c linux-2.6.39.4/drivers/char/random.c
24024 --- linux-2.6.39.4/drivers/char/random.c 2011-05-19 00:06:34.000000000 -0400
24025 +++ linux-2.6.39.4/drivers/char/random.c 2011-08-05 19:44:36.000000000 -0400
24026 @@ -261,8 +261,13 @@
24027 /*
24028 * Configuration information
24029 */
24030 +#ifdef CONFIG_GRKERNSEC_RANDNET
24031 +#define INPUT_POOL_WORDS 512
24032 +#define OUTPUT_POOL_WORDS 128
24033 +#else
24034 #define INPUT_POOL_WORDS 128
24035 #define OUTPUT_POOL_WORDS 32
24036 +#endif
24037 #define SEC_XFER_SIZE 512
24038 #define EXTRACT_SIZE 10
24039
24040 @@ -300,10 +305,17 @@ static struct poolinfo {
24041 int poolwords;
24042 int tap1, tap2, tap3, tap4, tap5;
24043 } poolinfo_table[] = {
24044 +#ifdef CONFIG_GRKERNSEC_RANDNET
24045 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24046 + { 512, 411, 308, 208, 104, 1 },
24047 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24048 + { 128, 103, 76, 51, 25, 1 },
24049 +#else
24050 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24051 { 128, 103, 76, 51, 25, 1 },
24052 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24053 { 32, 26, 20, 14, 7, 1 },
24054 +#endif
24055 #if 0
24056 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24057 { 2048, 1638, 1231, 819, 411, 1 },
24058 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24059
24060 extract_buf(r, tmp);
24061 i = min_t(int, nbytes, EXTRACT_SIZE);
24062 - if (copy_to_user(buf, tmp, i)) {
24063 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24064 ret = -EFAULT;
24065 break;
24066 }
24067 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24068 #include <linux/sysctl.h>
24069
24070 static int min_read_thresh = 8, min_write_thresh;
24071 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
24072 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24073 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24074 static char sysctl_bootid[16];
24075
24076 diff -urNp linux-2.6.39.4/drivers/char/sonypi.c linux-2.6.39.4/drivers/char/sonypi.c
24077 --- linux-2.6.39.4/drivers/char/sonypi.c 2011-05-19 00:06:34.000000000 -0400
24078 +++ linux-2.6.39.4/drivers/char/sonypi.c 2011-08-05 19:44:36.000000000 -0400
24079 @@ -55,6 +55,7 @@
24080 #include <asm/uaccess.h>
24081 #include <asm/io.h>
24082 #include <asm/system.h>
24083 +#include <asm/local.h>
24084
24085 #include <linux/sonypi.h>
24086
24087 @@ -491,7 +492,7 @@ static struct sonypi_device {
24088 spinlock_t fifo_lock;
24089 wait_queue_head_t fifo_proc_list;
24090 struct fasync_struct *fifo_async;
24091 - int open_count;
24092 + local_t open_count;
24093 int model;
24094 struct input_dev *input_jog_dev;
24095 struct input_dev *input_key_dev;
24096 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24097 static int sonypi_misc_release(struct inode *inode, struct file *file)
24098 {
24099 mutex_lock(&sonypi_device.lock);
24100 - sonypi_device.open_count--;
24101 + local_dec(&sonypi_device.open_count);
24102 mutex_unlock(&sonypi_device.lock);
24103 return 0;
24104 }
24105 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24106 {
24107 mutex_lock(&sonypi_device.lock);
24108 /* Flush input queue on first open */
24109 - if (!sonypi_device.open_count)
24110 + if (!local_read(&sonypi_device.open_count))
24111 kfifo_reset(&sonypi_device.fifo);
24112 - sonypi_device.open_count++;
24113 + local_inc(&sonypi_device.open_count);
24114 mutex_unlock(&sonypi_device.lock);
24115
24116 return 0;
24117 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm_bios.c linux-2.6.39.4/drivers/char/tpm/tpm_bios.c
24118 --- linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-05-19 00:06:34.000000000 -0400
24119 +++ linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-08-05 19:44:36.000000000 -0400
24120 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24121 event = addr;
24122
24123 if ((event->event_type == 0 && event->event_size == 0) ||
24124 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24125 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24126 return NULL;
24127
24128 return addr;
24129 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24130 return NULL;
24131
24132 if ((event->event_type == 0 && event->event_size == 0) ||
24133 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24134 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24135 return NULL;
24136
24137 (*pos)++;
24138 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24139 int i;
24140
24141 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24142 - seq_putc(m, data[i]);
24143 + if (!seq_putc(m, data[i]))
24144 + return -EFAULT;
24145
24146 return 0;
24147 }
24148 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24149 log->bios_event_log_end = log->bios_event_log + len;
24150
24151 virt = acpi_os_map_memory(start, len);
24152 + if (!virt) {
24153 + kfree(log->bios_event_log);
24154 + log->bios_event_log = NULL;
24155 + return -EFAULT;
24156 + }
24157
24158 memcpy(log->bios_event_log, virt, len);
24159
24160 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm.c linux-2.6.39.4/drivers/char/tpm/tpm.c
24161 --- linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-05-19 00:06:34.000000000 -0400
24162 +++ linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-08-05 19:44:36.000000000 -0400
24163 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24164 chip->vendor.req_complete_val)
24165 goto out_recv;
24166
24167 - if ((status == chip->vendor.req_canceled)) {
24168 + if (status == chip->vendor.req_canceled) {
24169 dev_err(chip->dev, "Operation Canceled\n");
24170 rc = -ECANCELED;
24171 goto out;
24172 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24173
24174 struct tpm_chip *chip = dev_get_drvdata(dev);
24175
24176 + pax_track_stack();
24177 +
24178 tpm_cmd.header.in = tpm_readpubek_header;
24179 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24180 "attempting to read the PUBEK");
24181 diff -urNp linux-2.6.39.4/drivers/crypto/hifn_795x.c linux-2.6.39.4/drivers/crypto/hifn_795x.c
24182 --- linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-05-19 00:06:34.000000000 -0400
24183 +++ linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-08-05 19:44:36.000000000 -0400
24184 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24185 0xCA, 0x34, 0x2B, 0x2E};
24186 struct scatterlist sg;
24187
24188 + pax_track_stack();
24189 +
24190 memset(src, 0, sizeof(src));
24191 memset(ctx.key, 0, sizeof(ctx.key));
24192
24193 diff -urNp linux-2.6.39.4/drivers/crypto/padlock-aes.c linux-2.6.39.4/drivers/crypto/padlock-aes.c
24194 --- linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-05-19 00:06:34.000000000 -0400
24195 +++ linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-08-05 19:44:36.000000000 -0400
24196 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24197 struct crypto_aes_ctx gen_aes;
24198 int cpu;
24199
24200 + pax_track_stack();
24201 +
24202 if (key_len % 8) {
24203 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24204 return -EINVAL;
24205 diff -urNp linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c
24206 --- linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-05-19 00:06:34.000000000 -0400
24207 +++ linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-08-05 19:44:36.000000000 -0400
24208 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24209 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24210 static int edac_pci_poll_msec = 1000; /* one second workq period */
24211
24212 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24213 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24214 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24215 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24216
24217 static struct kobject *edac_pci_top_main_kobj;
24218 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24219 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24220 edac_printk(KERN_CRIT, EDAC_PCI,
24221 "Signaled System Error on %s\n",
24222 pci_name(dev));
24223 - atomic_inc(&pci_nonparity_count);
24224 + atomic_inc_unchecked(&pci_nonparity_count);
24225 }
24226
24227 if (status & (PCI_STATUS_PARITY)) {
24228 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24229 "Master Data Parity Error on %s\n",
24230 pci_name(dev));
24231
24232 - atomic_inc(&pci_parity_count);
24233 + atomic_inc_unchecked(&pci_parity_count);
24234 }
24235
24236 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24237 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24238 "Detected Parity Error on %s\n",
24239 pci_name(dev));
24240
24241 - atomic_inc(&pci_parity_count);
24242 + atomic_inc_unchecked(&pci_parity_count);
24243 }
24244 }
24245
24246 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24247 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24248 "Signaled System Error on %s\n",
24249 pci_name(dev));
24250 - atomic_inc(&pci_nonparity_count);
24251 + atomic_inc_unchecked(&pci_nonparity_count);
24252 }
24253
24254 if (status & (PCI_STATUS_PARITY)) {
24255 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24256 "Master Data Parity Error on "
24257 "%s\n", pci_name(dev));
24258
24259 - atomic_inc(&pci_parity_count);
24260 + atomic_inc_unchecked(&pci_parity_count);
24261 }
24262
24263 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24264 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24265 "Detected Parity Error on %s\n",
24266 pci_name(dev));
24267
24268 - atomic_inc(&pci_parity_count);
24269 + atomic_inc_unchecked(&pci_parity_count);
24270 }
24271 }
24272 }
24273 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24274 if (!check_pci_errors)
24275 return;
24276
24277 - before_count = atomic_read(&pci_parity_count);
24278 + before_count = atomic_read_unchecked(&pci_parity_count);
24279
24280 /* scan all PCI devices looking for a Parity Error on devices and
24281 * bridges.
24282 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24283 /* Only if operator has selected panic on PCI Error */
24284 if (edac_pci_get_panic_on_pe()) {
24285 /* If the count is different 'after' from 'before' */
24286 - if (before_count != atomic_read(&pci_parity_count))
24287 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24288 panic("EDAC: PCI Parity Error");
24289 }
24290 }
24291 diff -urNp linux-2.6.39.4/drivers/edac/i7core_edac.c linux-2.6.39.4/drivers/edac/i7core_edac.c
24292 --- linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-05-19 00:06:34.000000000 -0400
24293 +++ linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-08-05 19:44:36.000000000 -0400
24294 @@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24295 char *type, *optype, *err, *msg;
24296 unsigned long error = m->status & 0x1ff0000l;
24297 u32 optypenum = (m->status >> 4) & 0x07;
24298 - u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24299 + u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24300 u32 dimm = (m->misc >> 16) & 0x3;
24301 u32 channel = (m->misc >> 18) & 0x3;
24302 u32 syndrome = m->misc >> 32;
24303 diff -urNp linux-2.6.39.4/drivers/edac/mce_amd.h linux-2.6.39.4/drivers/edac/mce_amd.h
24304 --- linux-2.6.39.4/drivers/edac/mce_amd.h 2011-05-19 00:06:34.000000000 -0400
24305 +++ linux-2.6.39.4/drivers/edac/mce_amd.h 2011-08-05 20:34:06.000000000 -0400
24306 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24307 bool (*dc_mce)(u16, u8);
24308 bool (*ic_mce)(u16, u8);
24309 bool (*nb_mce)(u16, u8);
24310 -};
24311 +} __no_const;
24312
24313 void amd_report_gart_errors(bool);
24314 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24315 diff -urNp linux-2.6.39.4/drivers/firewire/core-card.c linux-2.6.39.4/drivers/firewire/core-card.c
24316 --- linux-2.6.39.4/drivers/firewire/core-card.c 2011-05-19 00:06:34.000000000 -0400
24317 +++ linux-2.6.39.4/drivers/firewire/core-card.c 2011-08-05 20:34:06.000000000 -0400
24318 @@ -652,7 +652,7 @@ void fw_card_release(struct kref *kref)
24319
24320 void fw_core_remove_card(struct fw_card *card)
24321 {
24322 - struct fw_card_driver dummy_driver = dummy_driver_template;
24323 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24324
24325 card->driver->update_phy_reg(card, 4,
24326 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24327 diff -urNp linux-2.6.39.4/drivers/firewire/core-cdev.c linux-2.6.39.4/drivers/firewire/core-cdev.c
24328 --- linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-05-19 00:06:34.000000000 -0400
24329 +++ linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-08-05 19:44:36.000000000 -0400
24330 @@ -1312,8 +1312,7 @@ static int init_iso_resource(struct clie
24331 int ret;
24332
24333 if ((request->channels == 0 && request->bandwidth == 0) ||
24334 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24335 - request->bandwidth < 0)
24336 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24337 return -EINVAL;
24338
24339 r = kmalloc(sizeof(*r), GFP_KERNEL);
24340 diff -urNp linux-2.6.39.4/drivers/firewire/core.h linux-2.6.39.4/drivers/firewire/core.h
24341 --- linux-2.6.39.4/drivers/firewire/core.h 2011-05-19 00:06:34.000000000 -0400
24342 +++ linux-2.6.39.4/drivers/firewire/core.h 2011-08-05 20:34:06.000000000 -0400
24343 @@ -99,6 +99,7 @@ struct fw_card_driver {
24344
24345 int (*stop_iso)(struct fw_iso_context *ctx);
24346 };
24347 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24348
24349 void fw_card_initialize(struct fw_card *card,
24350 const struct fw_card_driver *driver, struct device *device);
24351 diff -urNp linux-2.6.39.4/drivers/firewire/core-transaction.c linux-2.6.39.4/drivers/firewire/core-transaction.c
24352 --- linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-05-19 00:06:34.000000000 -0400
24353 +++ linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-08-05 19:44:36.000000000 -0400
24354 @@ -36,6 +36,7 @@
24355 #include <linux/string.h>
24356 #include <linux/timer.h>
24357 #include <linux/types.h>
24358 +#include <linux/sched.h>
24359
24360 #include <asm/byteorder.h>
24361
24362 @@ -420,6 +421,8 @@ int fw_run_transaction(struct fw_card *c
24363 struct transaction_callback_data d;
24364 struct fw_transaction t;
24365
24366 + pax_track_stack();
24367 +
24368 init_timer_on_stack(&t.split_timeout_timer);
24369 init_completion(&d.done);
24370 d.payload = payload;
24371 diff -urNp linux-2.6.39.4/drivers/firmware/dmi_scan.c linux-2.6.39.4/drivers/firmware/dmi_scan.c
24372 --- linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-05-19 00:06:34.000000000 -0400
24373 +++ linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-08-05 19:44:36.000000000 -0400
24374 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24375 }
24376 }
24377 else {
24378 - /*
24379 - * no iounmap() for that ioremap(); it would be a no-op, but
24380 - * it's so early in setup that sucker gets confused into doing
24381 - * what it shouldn't if we actually call it.
24382 - */
24383 p = dmi_ioremap(0xF0000, 0x10000);
24384 if (p == NULL)
24385 goto error;
24386 diff -urNp linux-2.6.39.4/drivers/gpio/vr41xx_giu.c linux-2.6.39.4/drivers/gpio/vr41xx_giu.c
24387 --- linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-05-19 00:06:34.000000000 -0400
24388 +++ linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-08-05 19:44:36.000000000 -0400
24389 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24390 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24391 maskl, pendl, maskh, pendh);
24392
24393 - atomic_inc(&irq_err_count);
24394 + atomic_inc_unchecked(&irq_err_count);
24395
24396 return -EINVAL;
24397 }
24398 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c
24399 --- linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-05-19 00:06:34.000000000 -0400
24400 +++ linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-05 19:44:36.000000000 -0400
24401 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24402 struct drm_crtc *tmp;
24403 int crtc_mask = 1;
24404
24405 - WARN(!crtc, "checking null crtc?\n");
24406 + BUG_ON(!crtc);
24407
24408 dev = crtc->dev;
24409
24410 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24411 struct drm_encoder *encoder;
24412 bool ret = true;
24413
24414 + pax_track_stack();
24415 +
24416 crtc->enabled = drm_helper_crtc_in_use(crtc);
24417 if (!crtc->enabled)
24418 return true;
24419 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_drv.c linux-2.6.39.4/drivers/gpu/drm/drm_drv.c
24420 --- linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-05-19 00:06:34.000000000 -0400
24421 +++ linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-08-05 19:44:36.000000000 -0400
24422 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24423
24424 dev = file_priv->minor->dev;
24425 atomic_inc(&dev->ioctl_count);
24426 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24427 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24428 ++file_priv->ioctl_count;
24429
24430 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24431 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_fops.c linux-2.6.39.4/drivers/gpu/drm/drm_fops.c
24432 --- linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-05-19 00:06:34.000000000 -0400
24433 +++ linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-08-05 19:44:36.000000000 -0400
24434 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24435 }
24436
24437 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24438 - atomic_set(&dev->counts[i], 0);
24439 + atomic_set_unchecked(&dev->counts[i], 0);
24440
24441 dev->sigdata.lock = NULL;
24442
24443 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24444
24445 retcode = drm_open_helper(inode, filp, dev);
24446 if (!retcode) {
24447 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24448 - if (!dev->open_count++)
24449 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24450 + if (local_inc_return(&dev->open_count) == 1)
24451 retcode = drm_setup(dev);
24452 }
24453 if (!retcode) {
24454 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24455
24456 mutex_lock(&drm_global_mutex);
24457
24458 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24459 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24460
24461 if (dev->driver->preclose)
24462 dev->driver->preclose(dev, file_priv);
24463 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24464 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24465 task_pid_nr(current),
24466 (long)old_encode_dev(file_priv->minor->device),
24467 - dev->open_count);
24468 + local_read(&dev->open_count));
24469
24470 /* if the master has gone away we can't do anything with the lock */
24471 if (file_priv->minor->master)
24472 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24473 * End inline drm_release
24474 */
24475
24476 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24477 - if (!--dev->open_count) {
24478 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24479 + if (local_dec_and_test(&dev->open_count)) {
24480 if (atomic_read(&dev->ioctl_count)) {
24481 DRM_ERROR("Device busy: %d\n",
24482 atomic_read(&dev->ioctl_count));
24483 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_global.c linux-2.6.39.4/drivers/gpu/drm/drm_global.c
24484 --- linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-05-19 00:06:34.000000000 -0400
24485 +++ linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-08-05 19:44:36.000000000 -0400
24486 @@ -36,7 +36,7 @@
24487 struct drm_global_item {
24488 struct mutex mutex;
24489 void *object;
24490 - int refcount;
24491 + atomic_t refcount;
24492 };
24493
24494 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24495 @@ -49,7 +49,7 @@ void drm_global_init(void)
24496 struct drm_global_item *item = &glob[i];
24497 mutex_init(&item->mutex);
24498 item->object = NULL;
24499 - item->refcount = 0;
24500 + atomic_set(&item->refcount, 0);
24501 }
24502 }
24503
24504 @@ -59,7 +59,7 @@ void drm_global_release(void)
24505 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24506 struct drm_global_item *item = &glob[i];
24507 BUG_ON(item->object != NULL);
24508 - BUG_ON(item->refcount != 0);
24509 + BUG_ON(atomic_read(&item->refcount) != 0);
24510 }
24511 }
24512
24513 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24514 void *object;
24515
24516 mutex_lock(&item->mutex);
24517 - if (item->refcount == 0) {
24518 + if (atomic_read(&item->refcount) == 0) {
24519 item->object = kzalloc(ref->size, GFP_KERNEL);
24520 if (unlikely(item->object == NULL)) {
24521 ret = -ENOMEM;
24522 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24523 goto out_err;
24524
24525 }
24526 - ++item->refcount;
24527 + atomic_inc(&item->refcount);
24528 ref->object = item->object;
24529 object = item->object;
24530 mutex_unlock(&item->mutex);
24531 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24532 struct drm_global_item *item = &glob[ref->global_type];
24533
24534 mutex_lock(&item->mutex);
24535 - BUG_ON(item->refcount == 0);
24536 + BUG_ON(atomic_read(&item->refcount) == 0);
24537 BUG_ON(ref->object != item->object);
24538 - if (--item->refcount == 0) {
24539 + if (atomic_dec_and_test(&item->refcount)) {
24540 ref->release(ref);
24541 item->object = NULL;
24542 }
24543 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_info.c linux-2.6.39.4/drivers/gpu/drm/drm_info.c
24544 --- linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-05-19 00:06:34.000000000 -0400
24545 +++ linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-08-05 19:44:36.000000000 -0400
24546 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24547 struct drm_local_map *map;
24548 struct drm_map_list *r_list;
24549
24550 - /* Hardcoded from _DRM_FRAME_BUFFER,
24551 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24552 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24553 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24554 + static const char * const types[] = {
24555 + [_DRM_FRAME_BUFFER] = "FB",
24556 + [_DRM_REGISTERS] = "REG",
24557 + [_DRM_SHM] = "SHM",
24558 + [_DRM_AGP] = "AGP",
24559 + [_DRM_SCATTER_GATHER] = "SG",
24560 + [_DRM_CONSISTENT] = "PCI",
24561 + [_DRM_GEM] = "GEM" };
24562 const char *type;
24563 int i;
24564
24565 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24566 map = r_list->map;
24567 if (!map)
24568 continue;
24569 - if (map->type < 0 || map->type > 5)
24570 + if (map->type >= ARRAY_SIZE(types))
24571 type = "??";
24572 else
24573 type = types[map->type];
24574 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24575 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24576 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24577 vma->vm_flags & VM_IO ? 'i' : '-',
24578 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24579 + 0);
24580 +#else
24581 vma->vm_pgoff);
24582 +#endif
24583
24584 #if defined(__i386__)
24585 pgprot = pgprot_val(vma->vm_page_prot);
24586 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c
24587 --- linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-05-19 00:06:34.000000000 -0400
24588 +++ linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-08-05 19:44:36.000000000 -0400
24589 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24590 stats->data[i].value =
24591 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24592 else
24593 - stats->data[i].value = atomic_read(&dev->counts[i]);
24594 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24595 stats->data[i].type = dev->types[i];
24596 }
24597
24598 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_lock.c linux-2.6.39.4/drivers/gpu/drm/drm_lock.c
24599 --- linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-05-19 00:06:34.000000000 -0400
24600 +++ linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-08-05 19:44:36.000000000 -0400
24601 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24602 if (drm_lock_take(&master->lock, lock->context)) {
24603 master->lock.file_priv = file_priv;
24604 master->lock.lock_time = jiffies;
24605 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24606 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24607 break; /* Got lock */
24608 }
24609
24610 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24611 return -EINVAL;
24612 }
24613
24614 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24615 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24616
24617 if (drm_lock_free(&master->lock, lock->context)) {
24618 /* FIXME: Should really bail out here. */
24619 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c
24620 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-05-19 00:06:34.000000000 -0400
24621 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-05 19:44:36.000000000 -0400
24622 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24623 dma->buflist[vertex->idx],
24624 vertex->discard, vertex->used);
24625
24626 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24627 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24628 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24629 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24630 sarea_priv->last_enqueue = dev_priv->counter - 1;
24631 sarea_priv->last_dispatch = (int)hw_status[5];
24632
24633 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24634 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24635 mc->last_render);
24636
24637 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24638 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24639 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24640 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24641 sarea_priv->last_enqueue = dev_priv->counter - 1;
24642 sarea_priv->last_dispatch = (int)hw_status[5];
24643
24644 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h
24645 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-05-19 00:06:34.000000000 -0400
24646 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-05 19:44:36.000000000 -0400
24647 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24648 int page_flipping;
24649
24650 wait_queue_head_t irq_queue;
24651 - atomic_t irq_received;
24652 - atomic_t irq_emitted;
24653 + atomic_unchecked_t irq_received;
24654 + atomic_unchecked_t irq_emitted;
24655
24656 int front_offset;
24657 } drm_i810_private_t;
24658 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c
24659 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-19 00:06:34.000000000 -0400
24660 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-05 19:44:36.000000000 -0400
24661 @@ -496,7 +496,7 @@ static int i915_interrupt_info(struct se
24662 I915_READ(GTIMR));
24663 }
24664 seq_printf(m, "Interrupts received: %d\n",
24665 - atomic_read(&dev_priv->irq_received));
24666 + atomic_read_unchecked(&dev_priv->irq_received));
24667 for (i = 0; i < I915_NUM_RINGS; i++) {
24668 if (IS_GEN6(dev)) {
24669 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24670 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c
24671 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-05-19 00:06:34.000000000 -0400
24672 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-05 19:44:36.000000000 -0400
24673 @@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
24674 bool can_switch;
24675
24676 spin_lock(&dev->count_lock);
24677 - can_switch = (dev->open_count == 0);
24678 + can_switch = (local_read(&dev->open_count) == 0);
24679 spin_unlock(&dev->count_lock);
24680 return can_switch;
24681 }
24682 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h
24683 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-05-19 00:06:34.000000000 -0400
24684 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:34:06.000000000 -0400
24685 @@ -209,7 +209,7 @@ struct drm_i915_display_funcs {
24686 /* display clock increase/decrease */
24687 /* pll clock increase/decrease */
24688 /* clock gating init */
24689 -};
24690 +} __no_const;
24691
24692 struct intel_device_info {
24693 u8 gen;
24694 @@ -287,7 +287,7 @@ typedef struct drm_i915_private {
24695 int current_page;
24696 int page_flipping;
24697
24698 - atomic_t irq_received;
24699 + atomic_unchecked_t irq_received;
24700
24701 /* protects the irq masks */
24702 spinlock_t irq_lock;
24703 @@ -848,7 +848,7 @@ struct drm_i915_gem_object {
24704 * will be page flipped away on the next vblank. When it
24705 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24706 */
24707 - atomic_t pending_flip;
24708 + atomic_unchecked_t pending_flip;
24709 };
24710
24711 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24712 @@ -1232,7 +1232,7 @@ extern int intel_setup_gmbus(struct drm_
24713 extern void intel_teardown_gmbus(struct drm_device *dev);
24714 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24715 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24716 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24717 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24718 {
24719 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24720 }
24721 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24722 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-19 00:06:34.000000000 -0400
24723 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-05 19:44:36.000000000 -0400
24724 @@ -192,7 +192,7 @@ i915_gem_object_set_to_gpu_domain(struct
24725 i915_gem_release_mmap(obj);
24726
24727 if (obj->base.pending_write_domain)
24728 - cd->flips |= atomic_read(&obj->pending_flip);
24729 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24730
24731 /* The actual obj->write_domain will be updated with
24732 * pending_write_domain after we emit the accumulated flush for all
24733 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c
24734 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-07-09 09:18:51.000000000 -0400
24735 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-05 19:44:36.000000000 -0400
24736 @@ -1101,7 +1101,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
24737 int ret = IRQ_NONE, pipe;
24738 bool blc_event = false;
24739
24740 - atomic_inc(&dev_priv->irq_received);
24741 + atomic_inc_unchecked(&dev_priv->irq_received);
24742
24743 if (HAS_PCH_SPLIT(dev))
24744 return ironlake_irq_handler(dev);
24745 @@ -1666,7 +1666,7 @@ void i915_driver_irq_preinstall(struct d
24746 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24747 int pipe;
24748
24749 - atomic_set(&dev_priv->irq_received, 0);
24750 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24751
24752 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24753 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24754 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c
24755 --- linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-05-19 00:06:34.000000000 -0400
24756 +++ linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-08-05 19:44:36.000000000 -0400
24757 @@ -2244,7 +2244,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24758
24759 wait_event(dev_priv->pending_flip_queue,
24760 atomic_read(&dev_priv->mm.wedged) ||
24761 - atomic_read(&obj->pending_flip) == 0);
24762 + atomic_read_unchecked(&obj->pending_flip) == 0);
24763
24764 /* Big Hammer, we also need to ensure that any pending
24765 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24766 @@ -2712,7 +2712,7 @@ static void intel_crtc_wait_for_pending_
24767 obj = to_intel_framebuffer(crtc->fb)->obj;
24768 dev_priv = crtc->dev->dev_private;
24769 wait_event(dev_priv->pending_flip_queue,
24770 - atomic_read(&obj->pending_flip) == 0);
24771 + atomic_read_unchecked(&obj->pending_flip) == 0);
24772 }
24773
24774 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24775 @@ -6016,7 +6016,7 @@ static void do_intel_finish_page_flip(st
24776
24777 atomic_clear_mask(1 << intel_crtc->plane,
24778 &obj->pending_flip.counter);
24779 - if (atomic_read(&obj->pending_flip) == 0)
24780 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24781 wake_up(&dev_priv->pending_flip_queue);
24782
24783 schedule_work(&work->work);
24784 @@ -6145,7 +6145,7 @@ static int intel_crtc_page_flip(struct d
24785 /* Block clients from rendering to the new back buffer until
24786 * the flip occurs and the object is no longer visible.
24787 */
24788 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24789 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24790
24791 switch (INTEL_INFO(dev)->gen) {
24792 case 2:
24793 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h
24794 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-05-19 00:06:34.000000000 -0400
24795 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-05 19:44:36.000000000 -0400
24796 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24797 u32 clear_cmd;
24798 u32 maccess;
24799
24800 - atomic_t vbl_received; /**< Number of vblanks received. */
24801 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24802 wait_queue_head_t fence_queue;
24803 - atomic_t last_fence_retired;
24804 + atomic_unchecked_t last_fence_retired;
24805 u32 next_fence_to_post;
24806
24807 unsigned int fb_cpp;
24808 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c
24809 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-05-19 00:06:34.000000000 -0400
24810 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-05 19:44:36.000000000 -0400
24811 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24812 if (crtc != 0)
24813 return 0;
24814
24815 - return atomic_read(&dev_priv->vbl_received);
24816 + return atomic_read_unchecked(&dev_priv->vbl_received);
24817 }
24818
24819
24820 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24821 /* VBLANK interrupt */
24822 if (status & MGA_VLINEPEN) {
24823 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24824 - atomic_inc(&dev_priv->vbl_received);
24825 + atomic_inc_unchecked(&dev_priv->vbl_received);
24826 drm_handle_vblank(dev, 0);
24827 handled = 1;
24828 }
24829 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24830 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24831 MGA_WRITE(MGA_PRIMEND, prim_end);
24832
24833 - atomic_inc(&dev_priv->last_fence_retired);
24834 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24835 DRM_WAKEUP(&dev_priv->fence_queue);
24836 handled = 1;
24837 }
24838 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24839 * using fences.
24840 */
24841 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24842 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24843 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24844 - *sequence) <= (1 << 23)));
24845
24846 *sequence = cur_fence;
24847 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24848 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-05-19 00:06:34.000000000 -0400
24849 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-05 20:34:06.000000000 -0400
24850 @@ -228,7 +228,7 @@ struct nouveau_channel {
24851 struct list_head pending;
24852 uint32_t sequence;
24853 uint32_t sequence_ack;
24854 - atomic_t last_sequence_irq;
24855 + atomic_unchecked_t last_sequence_irq;
24856 } fence;
24857
24858 /* DMA push buffer */
24859 @@ -317,13 +317,13 @@ struct nouveau_instmem_engine {
24860 struct nouveau_mc_engine {
24861 int (*init)(struct drm_device *dev);
24862 void (*takedown)(struct drm_device *dev);
24863 -};
24864 +} __no_const;
24865
24866 struct nouveau_timer_engine {
24867 int (*init)(struct drm_device *dev);
24868 void (*takedown)(struct drm_device *dev);
24869 uint64_t (*read)(struct drm_device *dev);
24870 -};
24871 +} __no_const;
24872
24873 struct nouveau_fb_engine {
24874 int num_tiles;
24875 @@ -516,7 +516,7 @@ struct nouveau_vram_engine {
24876 void (*put)(struct drm_device *, struct nouveau_mem **);
24877
24878 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24879 -};
24880 +} __no_const;
24881
24882 struct nouveau_engine {
24883 struct nouveau_instmem_engine instmem;
24884 @@ -662,7 +662,7 @@ struct drm_nouveau_private {
24885 struct drm_global_reference mem_global_ref;
24886 struct ttm_bo_global_ref bo_global_ref;
24887 struct ttm_bo_device bdev;
24888 - atomic_t validate_sequence;
24889 + atomic_unchecked_t validate_sequence;
24890 } ttm;
24891
24892 struct {
24893 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24894 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-19 00:06:34.000000000 -0400
24895 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-05 19:44:36.000000000 -0400
24896 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24897 if (USE_REFCNT(dev))
24898 sequence = nvchan_rd32(chan, 0x48);
24899 else
24900 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24901 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24902
24903 if (chan->fence.sequence_ack == sequence)
24904 goto out;
24905 @@ -553,7 +553,7 @@ nouveau_fence_channel_init(struct nouvea
24906 out_initialised:
24907 INIT_LIST_HEAD(&chan->fence.pending);
24908 spin_lock_init(&chan->fence.lock);
24909 - atomic_set(&chan->fence.last_sequence_irq, 0);
24910 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24911 return 0;
24912 }
24913
24914 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24915 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-19 00:06:34.000000000 -0400
24916 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-05 19:44:36.000000000 -0400
24917 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24918 int trycnt = 0;
24919 int ret, i;
24920
24921 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24922 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24923 retry:
24924 if (++trycnt > 100000) {
24925 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24926 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c
24927 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-19 00:06:34.000000000 -0400
24928 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-05 19:44:36.000000000 -0400
24929 @@ -583,7 +583,7 @@ static bool nouveau_switcheroo_can_switc
24930 bool can_switch;
24931
24932 spin_lock(&dev->count_lock);
24933 - can_switch = (dev->open_count == 0);
24934 + can_switch = (local_read(&dev->open_count) == 0);
24935 spin_unlock(&dev->count_lock);
24936 return can_switch;
24937 }
24938 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c
24939 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-19 00:06:34.000000000 -0400
24940 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-05 19:44:36.000000000 -0400
24941 @@ -552,7 +552,7 @@ static int
24942 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24943 u32 class, u32 mthd, u32 data)
24944 {
24945 - atomic_set(&chan->fence.last_sequence_irq, data);
24946 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24947 return 0;
24948 }
24949
24950 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c
24951 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-05-19 00:06:34.000000000 -0400
24952 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-05 19:44:36.000000000 -0400
24953 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24954
24955 /* GH: Simple idle check.
24956 */
24957 - atomic_set(&dev_priv->idle_count, 0);
24958 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24959
24960 /* We don't support anything other than bus-mastering ring mode,
24961 * but the ring can be in either AGP or PCI space for the ring
24962 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h
24963 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-05-19 00:06:34.000000000 -0400
24964 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-05 19:44:36.000000000 -0400
24965 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
24966 int is_pci;
24967 unsigned long cce_buffers_offset;
24968
24969 - atomic_t idle_count;
24970 + atomic_unchecked_t idle_count;
24971
24972 int page_flipping;
24973 int current_page;
24974 u32 crtc_offset;
24975 u32 crtc_offset_cntl;
24976
24977 - atomic_t vbl_received;
24978 + atomic_unchecked_t vbl_received;
24979
24980 u32 color_fmt;
24981 unsigned int front_offset;
24982 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c
24983 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-05-19 00:06:34.000000000 -0400
24984 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-05 19:44:36.000000000 -0400
24985 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
24986 if (crtc != 0)
24987 return 0;
24988
24989 - return atomic_read(&dev_priv->vbl_received);
24990 + return atomic_read_unchecked(&dev_priv->vbl_received);
24991 }
24992
24993 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
24994 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
24995 /* VBLANK interrupt */
24996 if (status & R128_CRTC_VBLANK_INT) {
24997 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
24998 - atomic_inc(&dev_priv->vbl_received);
24999 + atomic_inc_unchecked(&dev_priv->vbl_received);
25000 drm_handle_vblank(dev, 0);
25001 return IRQ_HANDLED;
25002 }
25003 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c
25004 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-05-19 00:06:34.000000000 -0400
25005 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-08-05 19:44:36.000000000 -0400
25006 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25007
25008 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25009 {
25010 - if (atomic_read(&dev_priv->idle_count) == 0)
25011 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25012 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25013 else
25014 - atomic_set(&dev_priv->idle_count, 0);
25015 + atomic_set_unchecked(&dev_priv->idle_count, 0);
25016 }
25017
25018 #endif
25019 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c
25020 --- linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-05-19 00:06:34.000000000 -0400
25021 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-08-05 19:44:36.000000000 -0400
25022 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
25023 char name[512];
25024 int i;
25025
25026 + pax_track_stack();
25027 +
25028 ctx->card = card;
25029 ctx->bios = bios;
25030
25031 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c
25032 --- linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-05-19 00:06:34.000000000 -0400
25033 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-05 19:44:36.000000000 -0400
25034 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
25035 regex_t mask_rex;
25036 regmatch_t match[4];
25037 char buf[1024];
25038 - size_t end;
25039 + long end;
25040 int len;
25041 int done = 0;
25042 int r;
25043 unsigned o;
25044 struct offset *offset;
25045 char last_reg_s[10];
25046 - int last_reg;
25047 + unsigned long last_reg;
25048
25049 if (regcomp
25050 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25051 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c
25052 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-19 00:06:34.000000000 -0400
25053 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-05 19:44:36.000000000 -0400
25054 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25055 struct radeon_gpio_rec gpio;
25056 struct radeon_hpd hpd;
25057
25058 + pax_track_stack();
25059 +
25060 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25061 return false;
25062
25063 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c
25064 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-06-25 12:55:22.000000000 -0400
25065 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-05 19:44:36.000000000 -0400
25066 @@ -674,7 +674,7 @@ static bool radeon_switcheroo_can_switch
25067 bool can_switch;
25068
25069 spin_lock(&dev->count_lock);
25070 - can_switch = (dev->open_count == 0);
25071 + can_switch = (local_read(&dev->open_count) == 0);
25072 spin_unlock(&dev->count_lock);
25073 return can_switch;
25074 }
25075 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c
25076 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:11:51.000000000 -0400
25077 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:12:20.000000000 -0400
25078 @@ -937,6 +937,8 @@ void radeon_compute_pll_legacy(struct ra
25079 uint32_t post_div;
25080 u32 pll_out_min, pll_out_max;
25081
25082 + pax_track_stack();
25083 +
25084 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25085 freq = freq * 1000;
25086
25087 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h
25088 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-19 00:06:34.000000000 -0400
25089 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-05 19:44:36.000000000 -0400
25090 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25091
25092 /* SW interrupt */
25093 wait_queue_head_t swi_queue;
25094 - atomic_t swi_emitted;
25095 + atomic_unchecked_t swi_emitted;
25096 int vblank_crtc;
25097 uint32_t irq_enable_reg;
25098 uint32_t r500_disp_irq_reg;
25099 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c
25100 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-19 00:06:34.000000000 -0400
25101 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-05 19:44:36.000000000 -0400
25102 @@ -49,7 +49,7 @@ int radeon_fence_emit(struct radeon_devi
25103 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25104 return 0;
25105 }
25106 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25107 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25108 if (!rdev->cp.ready) {
25109 /* FIXME: cp is not running assume everythings is done right
25110 * away
25111 @@ -352,7 +352,7 @@ int radeon_fence_driver_init(struct rade
25112 return r;
25113 }
25114 WREG32(rdev->fence_drv.scratch_reg, 0);
25115 - atomic_set(&rdev->fence_drv.seq, 0);
25116 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25117 INIT_LIST_HEAD(&rdev->fence_drv.created);
25118 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25119 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25120 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h
25121 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-05-19 00:06:34.000000000 -0400
25122 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:34:06.000000000 -0400
25123 @@ -189,7 +189,7 @@ extern int sumo_get_temp(struct radeon_d
25124 */
25125 struct radeon_fence_driver {
25126 uint32_t scratch_reg;
25127 - atomic_t seq;
25128 + atomic_unchecked_t seq;
25129 uint32_t last_seq;
25130 unsigned long last_jiffies;
25131 unsigned long last_timeout;
25132 @@ -958,7 +958,7 @@ struct radeon_asic {
25133 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25134 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25135 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25136 -};
25137 +} __no_const;
25138
25139 /*
25140 * Asic structures
25141 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25142 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-19 00:06:34.000000000 -0400
25143 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-05 19:44:36.000000000 -0400
25144 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25145 request = compat_alloc_user_space(sizeof(*request));
25146 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25147 || __put_user(req32.param, &request->param)
25148 - || __put_user((void __user *)(unsigned long)req32.value,
25149 + || __put_user((unsigned long)req32.value,
25150 &request->value))
25151 return -EFAULT;
25152
25153 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c
25154 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-19 00:06:34.000000000 -0400
25155 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-05 19:44:36.000000000 -0400
25156 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25157 unsigned int ret;
25158 RING_LOCALS;
25159
25160 - atomic_inc(&dev_priv->swi_emitted);
25161 - ret = atomic_read(&dev_priv->swi_emitted);
25162 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25163 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25164
25165 BEGIN_RING(4);
25166 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25167 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25168 drm_radeon_private_t *dev_priv =
25169 (drm_radeon_private_t *) dev->dev_private;
25170
25171 - atomic_set(&dev_priv->swi_emitted, 0);
25172 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25173 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25174
25175 dev->max_vblank_count = 0x001fffff;
25176 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c
25177 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-05-19 00:06:34.000000000 -0400
25178 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-05 19:44:36.000000000 -0400
25179 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25180 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25181 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25182
25183 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25184 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25185 sarea_priv->nbox * sizeof(depth_boxes[0])))
25186 return -EFAULT;
25187
25188 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25189 {
25190 drm_radeon_private_t *dev_priv = dev->dev_private;
25191 drm_radeon_getparam_t *param = data;
25192 - int value;
25193 + int value = 0;
25194
25195 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25196
25197 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c
25198 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-19 00:06:34.000000000 -0400
25199 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-05 20:34:06.000000000 -0400
25200 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25201 }
25202 if (unlikely(ttm_vm_ops == NULL)) {
25203 ttm_vm_ops = vma->vm_ops;
25204 - radeon_ttm_vm_ops = *ttm_vm_ops;
25205 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25206 + pax_open_kernel();
25207 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25208 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25209 + pax_close_kernel();
25210 }
25211 vma->vm_ops = &radeon_ttm_vm_ops;
25212 return 0;
25213 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c
25214 --- linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-05-19 00:06:34.000000000 -0400
25215 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-08-05 19:44:36.000000000 -0400
25216 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25217 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25218 rdev->pm.sideport_bandwidth.full)
25219 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25220 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25221 + read_delay_latency.full = dfixed_const(800 * 1000);
25222 read_delay_latency.full = dfixed_div(read_delay_latency,
25223 rdev->pm.igp_sideport_mclk);
25224 + a.full = dfixed_const(370);
25225 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25226 } else {
25227 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25228 rdev->pm.k8_bandwidth.full)
25229 diff -urNp linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25230 --- linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-19 00:06:34.000000000 -0400
25231 +++ linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-05 19:44:36.000000000 -0400
25232 @@ -397,9 +397,9 @@ static int ttm_pool_get_num_unused_pages
25233 */
25234 static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
25235 {
25236 - static atomic_t start_pool = ATOMIC_INIT(0);
25237 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25238 unsigned i;
25239 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25240 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25241 struct ttm_page_pool *pool;
25242
25243 pool_offset = pool_offset % NUM_POOLS;
25244 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h
25245 --- linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-05-19 00:06:34.000000000 -0400
25246 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-08-05 19:44:36.000000000 -0400
25247 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25248 typedef uint32_t maskarray_t[5];
25249
25250 typedef struct drm_via_irq {
25251 - atomic_t irq_received;
25252 + atomic_unchecked_t irq_received;
25253 uint32_t pending_mask;
25254 uint32_t enable_mask;
25255 wait_queue_head_t irq_queue;
25256 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25257 struct timeval last_vblank;
25258 int last_vblank_valid;
25259 unsigned usec_per_vblank;
25260 - atomic_t vbl_received;
25261 + atomic_unchecked_t vbl_received;
25262 drm_via_state_t hc_state;
25263 char pci_buf[VIA_PCI_BUF_SIZE];
25264 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25265 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c
25266 --- linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-05-19 00:06:34.000000000 -0400
25267 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-08-05 19:44:36.000000000 -0400
25268 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25269 if (crtc != 0)
25270 return 0;
25271
25272 - return atomic_read(&dev_priv->vbl_received);
25273 + return atomic_read_unchecked(&dev_priv->vbl_received);
25274 }
25275
25276 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25277 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25278
25279 status = VIA_READ(VIA_REG_INTERRUPT);
25280 if (status & VIA_IRQ_VBLANK_PENDING) {
25281 - atomic_inc(&dev_priv->vbl_received);
25282 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25283 + atomic_inc_unchecked(&dev_priv->vbl_received);
25284 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25285 do_gettimeofday(&cur_vblank);
25286 if (dev_priv->last_vblank_valid) {
25287 dev_priv->usec_per_vblank =
25288 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25289 dev_priv->last_vblank = cur_vblank;
25290 dev_priv->last_vblank_valid = 1;
25291 }
25292 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25293 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25294 DRM_DEBUG("US per vblank is: %u\n",
25295 dev_priv->usec_per_vblank);
25296 }
25297 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25298
25299 for (i = 0; i < dev_priv->num_irqs; ++i) {
25300 if (status & cur_irq->pending_mask) {
25301 - atomic_inc(&cur_irq->irq_received);
25302 + atomic_inc_unchecked(&cur_irq->irq_received);
25303 DRM_WAKEUP(&cur_irq->irq_queue);
25304 handled = 1;
25305 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25306 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25307 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25308 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25309 masks[irq][4]));
25310 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25311 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25312 } else {
25313 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25314 (((cur_irq_sequence =
25315 - atomic_read(&cur_irq->irq_received)) -
25316 + atomic_read_unchecked(&cur_irq->irq_received)) -
25317 *sequence) <= (1 << 23)));
25318 }
25319 *sequence = cur_irq_sequence;
25320 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25321 }
25322
25323 for (i = 0; i < dev_priv->num_irqs; ++i) {
25324 - atomic_set(&cur_irq->irq_received, 0);
25325 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25326 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25327 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25328 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25329 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25330 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25331 case VIA_IRQ_RELATIVE:
25332 irqwait->request.sequence +=
25333 - atomic_read(&cur_irq->irq_received);
25334 + atomic_read_unchecked(&cur_irq->irq_received);
25335 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25336 case VIA_IRQ_ABSOLUTE:
25337 break;
25338 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25339 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-19 00:06:34.000000000 -0400
25340 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-05 19:44:36.000000000 -0400
25341 @@ -240,7 +240,7 @@ struct vmw_private {
25342 * Fencing and IRQs.
25343 */
25344
25345 - atomic_t fence_seq;
25346 + atomic_unchecked_t fence_seq;
25347 wait_queue_head_t fence_queue;
25348 wait_queue_head_t fifo_queue;
25349 atomic_t fence_queue_waiters;
25350 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25351 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-19 00:06:34.000000000 -0400
25352 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-05 19:44:36.000000000 -0400
25353 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25354 while (!vmw_lag_lt(queue, us)) {
25355 spin_lock(&queue->lock);
25356 if (list_empty(&queue->head))
25357 - sequence = atomic_read(&dev_priv->fence_seq);
25358 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25359 else {
25360 fence = list_first_entry(&queue->head,
25361 struct vmw_fence, head);
25362 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25363 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-19 00:06:34.000000000 -0400
25364 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-05 20:34:06.000000000 -0400
25365 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25366 (unsigned int) min,
25367 (unsigned int) fifo->capabilities);
25368
25369 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25370 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25371 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25372 vmw_fence_queue_init(&fifo->fence_queue);
25373 return vmw_fifo_send_fence(dev_priv, &dummy);
25374 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25375
25376 fm = vmw_fifo_reserve(dev_priv, bytes);
25377 if (unlikely(fm == NULL)) {
25378 - *sequence = atomic_read(&dev_priv->fence_seq);
25379 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25380 ret = -ENOMEM;
25381 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25382 false, 3*HZ);
25383 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25384 }
25385
25386 do {
25387 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25388 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25389 } while (*sequence == 0);
25390
25391 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25392 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25393 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-19 00:06:34.000000000 -0400
25394 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-05 19:44:36.000000000 -0400
25395 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25396 * emitted. Then the fence is stale and signaled.
25397 */
25398
25399 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25400 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25401 > VMW_FENCE_WRAP);
25402
25403 return ret;
25404 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25405
25406 if (fifo_idle)
25407 down_read(&fifo_state->rwsem);
25408 - signal_seq = atomic_read(&dev_priv->fence_seq);
25409 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25410 ret = 0;
25411
25412 for (;;) {
25413 diff -urNp linux-2.6.39.4/drivers/hid/hid-core.c linux-2.6.39.4/drivers/hid/hid-core.c
25414 --- linux-2.6.39.4/drivers/hid/hid-core.c 2011-05-19 00:06:34.000000000 -0400
25415 +++ linux-2.6.39.4/drivers/hid/hid-core.c 2011-08-05 19:44:36.000000000 -0400
25416 @@ -1888,7 +1888,7 @@ static bool hid_ignore(struct hid_device
25417
25418 int hid_add_device(struct hid_device *hdev)
25419 {
25420 - static atomic_t id = ATOMIC_INIT(0);
25421 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25422 int ret;
25423
25424 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25425 @@ -1903,7 +1903,7 @@ int hid_add_device(struct hid_device *hd
25426 /* XXX hack, any other cleaner solution after the driver core
25427 * is converted to allow more than 20 bytes as the device name? */
25428 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25429 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25430 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25431
25432 hid_debug_register(hdev, dev_name(&hdev->dev));
25433 ret = device_add(&hdev->dev);
25434 diff -urNp linux-2.6.39.4/drivers/hid/usbhid/hiddev.c linux-2.6.39.4/drivers/hid/usbhid/hiddev.c
25435 --- linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-05-19 00:06:34.000000000 -0400
25436 +++ linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-08-05 19:44:36.000000000 -0400
25437 @@ -613,7 +613,7 @@ static long hiddev_ioctl(struct file *fi
25438 break;
25439
25440 case HIDIOCAPPLICATION:
25441 - if (arg < 0 || arg >= hid->maxapplication)
25442 + if (arg >= hid->maxapplication)
25443 break;
25444
25445 for (i = 0; i < hid->maxcollection; i++)
25446 diff -urNp linux-2.6.39.4/drivers/hwmon/sht15.c linux-2.6.39.4/drivers/hwmon/sht15.c
25447 --- linux-2.6.39.4/drivers/hwmon/sht15.c 2011-05-19 00:06:34.000000000 -0400
25448 +++ linux-2.6.39.4/drivers/hwmon/sht15.c 2011-08-05 19:44:36.000000000 -0400
25449 @@ -113,7 +113,7 @@ struct sht15_data {
25450 int supply_uV;
25451 int supply_uV_valid;
25452 struct work_struct update_supply_work;
25453 - atomic_t interrupt_handled;
25454 + atomic_unchecked_t interrupt_handled;
25455 };
25456
25457 /**
25458 @@ -246,13 +246,13 @@ static inline int sht15_update_single_va
25459 return ret;
25460
25461 gpio_direction_input(data->pdata->gpio_data);
25462 - atomic_set(&data->interrupt_handled, 0);
25463 + atomic_set_unchecked(&data->interrupt_handled, 0);
25464
25465 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25466 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25467 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25468 /* Only relevant if the interrupt hasn't occurred. */
25469 - if (!atomic_read(&data->interrupt_handled))
25470 + if (!atomic_read_unchecked(&data->interrupt_handled))
25471 schedule_work(&data->read_work);
25472 }
25473 ret = wait_event_timeout(data->wait_queue,
25474 @@ -399,7 +399,7 @@ static irqreturn_t sht15_interrupt_fired
25475 struct sht15_data *data = d;
25476 /* First disable the interrupt */
25477 disable_irq_nosync(irq);
25478 - atomic_inc(&data->interrupt_handled);
25479 + atomic_inc_unchecked(&data->interrupt_handled);
25480 /* Then schedule a reading work struct */
25481 if (data->flag != SHT15_READING_NOTHING)
25482 schedule_work(&data->read_work);
25483 @@ -450,11 +450,11 @@ static void sht15_bh_read_data(struct wo
25484 here as could have gone low in meantime so verify
25485 it hasn't!
25486 */
25487 - atomic_set(&data->interrupt_handled, 0);
25488 + atomic_set_unchecked(&data->interrupt_handled, 0);
25489 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25490 /* If still not occurred or another handler has been scheduled */
25491 if (gpio_get_value(data->pdata->gpio_data)
25492 - || atomic_read(&data->interrupt_handled))
25493 + || atomic_read_unchecked(&data->interrupt_handled))
25494 return;
25495 }
25496 /* Read the data back from the device */
25497 diff -urNp linux-2.6.39.4/drivers/hwmon/w83791d.c linux-2.6.39.4/drivers/hwmon/w83791d.c
25498 --- linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-05-19 00:06:34.000000000 -0400
25499 +++ linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-08-05 19:44:36.000000000 -0400
25500 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25501 struct i2c_board_info *info);
25502 static int w83791d_remove(struct i2c_client *client);
25503
25504 -static int w83791d_read(struct i2c_client *client, u8 register);
25505 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25506 +static int w83791d_read(struct i2c_client *client, u8 reg);
25507 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25508 static struct w83791d_data *w83791d_update_device(struct device *dev);
25509
25510 #ifdef DEBUG
25511 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c
25512 --- linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-05-19 00:06:34.000000000 -0400
25513 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:34:06.000000000 -0400
25514 @@ -43,7 +43,7 @@
25515 extern struct i2c_adapter amd756_smbus;
25516
25517 static struct i2c_adapter *s4882_adapter;
25518 -static struct i2c_algorithm *s4882_algo;
25519 +static i2c_algorithm_no_const *s4882_algo;
25520
25521 /* Wrapper access functions for multiplexed SMBus */
25522 static DEFINE_MUTEX(amd756_lock);
25523 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25524 --- linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-05-19 00:06:34.000000000 -0400
25525 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:34:06.000000000 -0400
25526 @@ -41,7 +41,7 @@
25527 extern struct i2c_adapter *nforce2_smbus;
25528
25529 static struct i2c_adapter *s4985_adapter;
25530 -static struct i2c_algorithm *s4985_algo;
25531 +static i2c_algorithm_no_const *s4985_algo;
25532
25533 /* Wrapper access functions for multiplexed SMBus */
25534 static DEFINE_MUTEX(nforce2_lock);
25535 diff -urNp linux-2.6.39.4/drivers/i2c/i2c-mux.c linux-2.6.39.4/drivers/i2c/i2c-mux.c
25536 --- linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-05-19 00:06:34.000000000 -0400
25537 +++ linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-08-05 20:34:06.000000000 -0400
25538 @@ -28,7 +28,7 @@
25539 /* multiplexer per channel data */
25540 struct i2c_mux_priv {
25541 struct i2c_adapter adap;
25542 - struct i2c_algorithm algo;
25543 + i2c_algorithm_no_const algo;
25544
25545 struct i2c_adapter *parent;
25546 void *mux_dev; /* the mux chip/device */
25547 diff -urNp linux-2.6.39.4/drivers/ide/ide-cd.c linux-2.6.39.4/drivers/ide/ide-cd.c
25548 --- linux-2.6.39.4/drivers/ide/ide-cd.c 2011-06-03 00:04:14.000000000 -0400
25549 +++ linux-2.6.39.4/drivers/ide/ide-cd.c 2011-08-05 19:44:36.000000000 -0400
25550 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25551 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25552 if ((unsigned long)buf & alignment
25553 || blk_rq_bytes(rq) & q->dma_pad_mask
25554 - || object_is_on_stack(buf))
25555 + || object_starts_on_stack(buf))
25556 drive->dma = 0;
25557 }
25558 }
25559 diff -urNp linux-2.6.39.4/drivers/ide/ide-floppy.c linux-2.6.39.4/drivers/ide/ide-floppy.c
25560 --- linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-05-19 00:06:34.000000000 -0400
25561 +++ linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-08-05 19:44:36.000000000 -0400
25562 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25563 u8 pc_buf[256], header_len, desc_cnt;
25564 int i, rc = 1, blocks, length;
25565
25566 + pax_track_stack();
25567 +
25568 ide_debug_log(IDE_DBG_FUNC, "enter");
25569
25570 drive->bios_cyl = 0;
25571 diff -urNp linux-2.6.39.4/drivers/ide/setup-pci.c linux-2.6.39.4/drivers/ide/setup-pci.c
25572 --- linux-2.6.39.4/drivers/ide/setup-pci.c 2011-05-19 00:06:34.000000000 -0400
25573 +++ linux-2.6.39.4/drivers/ide/setup-pci.c 2011-08-05 19:44:36.000000000 -0400
25574 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25575 int ret, i, n_ports = dev2 ? 4 : 2;
25576 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25577
25578 + pax_track_stack();
25579 +
25580 for (i = 0; i < n_ports / 2; i++) {
25581 ret = ide_setup_pci_controller(pdev[i], d, !i);
25582 if (ret < 0)
25583 diff -urNp linux-2.6.39.4/drivers/infiniband/core/cm.c linux-2.6.39.4/drivers/infiniband/core/cm.c
25584 --- linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-05-19 00:06:34.000000000 -0400
25585 +++ linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-08-05 19:44:36.000000000 -0400
25586 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25587
25588 struct cm_counter_group {
25589 struct kobject obj;
25590 - atomic_long_t counter[CM_ATTR_COUNT];
25591 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25592 };
25593
25594 struct cm_counter_attribute {
25595 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25596 struct ib_mad_send_buf *msg = NULL;
25597 int ret;
25598
25599 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25600 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25601 counter[CM_REQ_COUNTER]);
25602
25603 /* Quick state check to discard duplicate REQs. */
25604 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25605 if (!cm_id_priv)
25606 return;
25607
25608 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25609 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25610 counter[CM_REP_COUNTER]);
25611 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25612 if (ret)
25613 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25614 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25615 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25616 spin_unlock_irq(&cm_id_priv->lock);
25617 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25618 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25619 counter[CM_RTU_COUNTER]);
25620 goto out;
25621 }
25622 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25623 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25624 dreq_msg->local_comm_id);
25625 if (!cm_id_priv) {
25626 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25627 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25628 counter[CM_DREQ_COUNTER]);
25629 cm_issue_drep(work->port, work->mad_recv_wc);
25630 return -EINVAL;
25631 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25632 case IB_CM_MRA_REP_RCVD:
25633 break;
25634 case IB_CM_TIMEWAIT:
25635 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25636 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25637 counter[CM_DREQ_COUNTER]);
25638 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25639 goto unlock;
25640 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25641 cm_free_msg(msg);
25642 goto deref;
25643 case IB_CM_DREQ_RCVD:
25644 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25645 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25646 counter[CM_DREQ_COUNTER]);
25647 goto unlock;
25648 default:
25649 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25650 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25651 cm_id_priv->msg, timeout)) {
25652 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25653 - atomic_long_inc(&work->port->
25654 + atomic_long_inc_unchecked(&work->port->
25655 counter_group[CM_RECV_DUPLICATES].
25656 counter[CM_MRA_COUNTER]);
25657 goto out;
25658 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25659 break;
25660 case IB_CM_MRA_REQ_RCVD:
25661 case IB_CM_MRA_REP_RCVD:
25662 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25663 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25664 counter[CM_MRA_COUNTER]);
25665 /* fall through */
25666 default:
25667 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25668 case IB_CM_LAP_IDLE:
25669 break;
25670 case IB_CM_MRA_LAP_SENT:
25671 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25672 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25673 counter[CM_LAP_COUNTER]);
25674 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25675 goto unlock;
25676 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25677 cm_free_msg(msg);
25678 goto deref;
25679 case IB_CM_LAP_RCVD:
25680 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25681 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25682 counter[CM_LAP_COUNTER]);
25683 goto unlock;
25684 default:
25685 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25686 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25687 if (cur_cm_id_priv) {
25688 spin_unlock_irq(&cm.lock);
25689 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25690 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25691 counter[CM_SIDR_REQ_COUNTER]);
25692 goto out; /* Duplicate message. */
25693 }
25694 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25695 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25696 msg->retries = 1;
25697
25698 - atomic_long_add(1 + msg->retries,
25699 + atomic_long_add_unchecked(1 + msg->retries,
25700 &port->counter_group[CM_XMIT].counter[attr_index]);
25701 if (msg->retries)
25702 - atomic_long_add(msg->retries,
25703 + atomic_long_add_unchecked(msg->retries,
25704 &port->counter_group[CM_XMIT_RETRIES].
25705 counter[attr_index]);
25706
25707 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25708 }
25709
25710 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25711 - atomic_long_inc(&port->counter_group[CM_RECV].
25712 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25713 counter[attr_id - CM_ATTR_ID_OFFSET]);
25714
25715 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25716 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25717 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25718
25719 return sprintf(buf, "%ld\n",
25720 - atomic_long_read(&group->counter[cm_attr->index]));
25721 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25722 }
25723
25724 static const struct sysfs_ops cm_counter_ops = {
25725 diff -urNp linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c
25726 --- linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-05-19 00:06:34.000000000 -0400
25727 +++ linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-08-05 19:44:36.000000000 -0400
25728 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25729
25730 struct task_struct *thread;
25731
25732 - atomic_t req_ser;
25733 - atomic_t flush_ser;
25734 + atomic_unchecked_t req_ser;
25735 + atomic_unchecked_t flush_ser;
25736
25737 wait_queue_head_t force_wait;
25738 };
25739 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25740 struct ib_fmr_pool *pool = pool_ptr;
25741
25742 do {
25743 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25744 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25745 ib_fmr_batch_release(pool);
25746
25747 - atomic_inc(&pool->flush_ser);
25748 + atomic_inc_unchecked(&pool->flush_ser);
25749 wake_up_interruptible(&pool->force_wait);
25750
25751 if (pool->flush_function)
25752 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25753 }
25754
25755 set_current_state(TASK_INTERRUPTIBLE);
25756 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25757 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25758 !kthread_should_stop())
25759 schedule();
25760 __set_current_state(TASK_RUNNING);
25761 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25762 pool->dirty_watermark = params->dirty_watermark;
25763 pool->dirty_len = 0;
25764 spin_lock_init(&pool->pool_lock);
25765 - atomic_set(&pool->req_ser, 0);
25766 - atomic_set(&pool->flush_ser, 0);
25767 + atomic_set_unchecked(&pool->req_ser, 0);
25768 + atomic_set_unchecked(&pool->flush_ser, 0);
25769 init_waitqueue_head(&pool->force_wait);
25770
25771 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25772 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25773 }
25774 spin_unlock_irq(&pool->pool_lock);
25775
25776 - serial = atomic_inc_return(&pool->req_ser);
25777 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25778 wake_up_process(pool->thread);
25779
25780 if (wait_event_interruptible(pool->force_wait,
25781 - atomic_read(&pool->flush_ser) - serial >= 0))
25782 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25783 return -EINTR;
25784
25785 return 0;
25786 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25787 } else {
25788 list_add_tail(&fmr->list, &pool->dirty_list);
25789 if (++pool->dirty_len >= pool->dirty_watermark) {
25790 - atomic_inc(&pool->req_ser);
25791 + atomic_inc_unchecked(&pool->req_ser);
25792 wake_up_process(pool->thread);
25793 }
25794 }
25795 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c
25796 --- linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-05-19 00:06:34.000000000 -0400
25797 +++ linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-05 19:44:36.000000000 -0400
25798 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25799 int err;
25800 struct fw_ri_tpte tpt;
25801 u32 stag_idx;
25802 - static atomic_t key;
25803 + static atomic_unchecked_t key;
25804
25805 if (c4iw_fatal_error(rdev))
25806 return -EIO;
25807 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25808 &rdev->resource.tpt_fifo_lock);
25809 if (!stag_idx)
25810 return -ENOMEM;
25811 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25812 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25813 }
25814 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25815 __func__, stag_state, type, pdid, stag_idx);
25816 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c
25817 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-19 00:06:34.000000000 -0400
25818 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-05 19:44:36.000000000 -0400
25819 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25820 struct infinipath_counters counters;
25821 struct ipath_devdata *dd;
25822
25823 + pax_track_stack();
25824 +
25825 dd = file->f_path.dentry->d_inode->i_private;
25826 dd->ipath_f_read_counters(dd, &counters);
25827
25828 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c
25829 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-19 00:06:34.000000000 -0400
25830 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-05 19:44:36.000000000 -0400
25831 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25832 struct ib_atomic_eth *ateth;
25833 struct ipath_ack_entry *e;
25834 u64 vaddr;
25835 - atomic64_t *maddr;
25836 + atomic64_unchecked_t *maddr;
25837 u64 sdata;
25838 u32 rkey;
25839 u8 next;
25840 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25841 IB_ACCESS_REMOTE_ATOMIC)))
25842 goto nack_acc_unlck;
25843 /* Perform atomic OP and save result. */
25844 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25845 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25846 sdata = be64_to_cpu(ateth->swap_data);
25847 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25848 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25849 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25850 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25851 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25852 be64_to_cpu(ateth->compare_data),
25853 sdata);
25854 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25855 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-19 00:06:34.000000000 -0400
25856 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-05 19:44:36.000000000 -0400
25857 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25858 unsigned long flags;
25859 struct ib_wc wc;
25860 u64 sdata;
25861 - atomic64_t *maddr;
25862 + atomic64_unchecked_t *maddr;
25863 enum ib_wc_status send_status;
25864
25865 /*
25866 @@ -382,11 +382,11 @@ again:
25867 IB_ACCESS_REMOTE_ATOMIC)))
25868 goto acc_err;
25869 /* Perform atomic OP and save result. */
25870 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25871 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25872 sdata = wqe->wr.wr.atomic.compare_add;
25873 *(u64 *) sqp->s_sge.sge.vaddr =
25874 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25875 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25876 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25877 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25878 sdata, wqe->wr.wr.atomic.swap);
25879 goto send_comp;
25880 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c
25881 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-05-19 00:06:34.000000000 -0400
25882 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-08-05 19:44:36.000000000 -0400
25883 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25884 LIST_HEAD(nes_adapter_list);
25885 static LIST_HEAD(nes_dev_list);
25886
25887 -atomic_t qps_destroyed;
25888 +atomic_unchecked_t qps_destroyed;
25889
25890 static unsigned int ee_flsh_adapter;
25891 static unsigned int sysfs_nonidx_addr;
25892 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25893 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25894 struct nes_adapter *nesadapter = nesdev->nesadapter;
25895
25896 - atomic_inc(&qps_destroyed);
25897 + atomic_inc_unchecked(&qps_destroyed);
25898
25899 /* Free the control structures */
25900
25901 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c
25902 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-05-19 00:06:34.000000000 -0400
25903 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-05 19:44:36.000000000 -0400
25904 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25905 u32 cm_packets_retrans;
25906 u32 cm_packets_created;
25907 u32 cm_packets_received;
25908 -atomic_t cm_listens_created;
25909 -atomic_t cm_listens_destroyed;
25910 +atomic_unchecked_t cm_listens_created;
25911 +atomic_unchecked_t cm_listens_destroyed;
25912 u32 cm_backlog_drops;
25913 -atomic_t cm_loopbacks;
25914 -atomic_t cm_nodes_created;
25915 -atomic_t cm_nodes_destroyed;
25916 -atomic_t cm_accel_dropped_pkts;
25917 -atomic_t cm_resets_recvd;
25918 +atomic_unchecked_t cm_loopbacks;
25919 +atomic_unchecked_t cm_nodes_created;
25920 +atomic_unchecked_t cm_nodes_destroyed;
25921 +atomic_unchecked_t cm_accel_dropped_pkts;
25922 +atomic_unchecked_t cm_resets_recvd;
25923
25924 static inline int mini_cm_accelerated(struct nes_cm_core *,
25925 struct nes_cm_node *);
25926 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25927
25928 static struct nes_cm_core *g_cm_core;
25929
25930 -atomic_t cm_connects;
25931 -atomic_t cm_accepts;
25932 -atomic_t cm_disconnects;
25933 -atomic_t cm_closes;
25934 -atomic_t cm_connecteds;
25935 -atomic_t cm_connect_reqs;
25936 -atomic_t cm_rejects;
25937 +atomic_unchecked_t cm_connects;
25938 +atomic_unchecked_t cm_accepts;
25939 +atomic_unchecked_t cm_disconnects;
25940 +atomic_unchecked_t cm_closes;
25941 +atomic_unchecked_t cm_connecteds;
25942 +atomic_unchecked_t cm_connect_reqs;
25943 +atomic_unchecked_t cm_rejects;
25944
25945
25946 /**
25947 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25948 kfree(listener);
25949 listener = NULL;
25950 ret = 0;
25951 - atomic_inc(&cm_listens_destroyed);
25952 + atomic_inc_unchecked(&cm_listens_destroyed);
25953 } else {
25954 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25955 }
25956 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25957 cm_node->rem_mac);
25958
25959 add_hte_node(cm_core, cm_node);
25960 - atomic_inc(&cm_nodes_created);
25961 + atomic_inc_unchecked(&cm_nodes_created);
25962
25963 return cm_node;
25964 }
25965 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
25966 }
25967
25968 atomic_dec(&cm_core->node_cnt);
25969 - atomic_inc(&cm_nodes_destroyed);
25970 + atomic_inc_unchecked(&cm_nodes_destroyed);
25971 nesqp = cm_node->nesqp;
25972 if (nesqp) {
25973 nesqp->cm_node = NULL;
25974 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
25975
25976 static void drop_packet(struct sk_buff *skb)
25977 {
25978 - atomic_inc(&cm_accel_dropped_pkts);
25979 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
25980 dev_kfree_skb_any(skb);
25981 }
25982
25983 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
25984 {
25985
25986 int reset = 0; /* whether to send reset in case of err.. */
25987 - atomic_inc(&cm_resets_recvd);
25988 + atomic_inc_unchecked(&cm_resets_recvd);
25989 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
25990 " refcnt=%d\n", cm_node, cm_node->state,
25991 atomic_read(&cm_node->ref_count));
25992 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
25993 rem_ref_cm_node(cm_node->cm_core, cm_node);
25994 return NULL;
25995 }
25996 - atomic_inc(&cm_loopbacks);
25997 + atomic_inc_unchecked(&cm_loopbacks);
25998 loopbackremotenode->loopbackpartner = cm_node;
25999 loopbackremotenode->tcp_cntxt.rcv_wscale =
26000 NES_CM_DEFAULT_RCV_WND_SCALE;
26001 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
26002 add_ref_cm_node(cm_node);
26003 } else if (cm_node->state == NES_CM_STATE_TSA) {
26004 rem_ref_cm_node(cm_core, cm_node);
26005 - atomic_inc(&cm_accel_dropped_pkts);
26006 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26007 dev_kfree_skb_any(skb);
26008 break;
26009 }
26010 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
26011
26012 if ((cm_id) && (cm_id->event_handler)) {
26013 if (issue_disconn) {
26014 - atomic_inc(&cm_disconnects);
26015 + atomic_inc_unchecked(&cm_disconnects);
26016 cm_event.event = IW_CM_EVENT_DISCONNECT;
26017 cm_event.status = disconn_status;
26018 cm_event.local_addr = cm_id->local_addr;
26019 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
26020 }
26021
26022 if (issue_close) {
26023 - atomic_inc(&cm_closes);
26024 + atomic_inc_unchecked(&cm_closes);
26025 nes_disconnect(nesqp, 1);
26026
26027 cm_id->provider_data = nesqp;
26028 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
26029
26030 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
26031 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
26032 - atomic_inc(&cm_accepts);
26033 + atomic_inc_unchecked(&cm_accepts);
26034
26035 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26036 netdev_refcnt_read(nesvnic->netdev));
26037 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26038
26039 struct nes_cm_core *cm_core;
26040
26041 - atomic_inc(&cm_rejects);
26042 + atomic_inc_unchecked(&cm_rejects);
26043 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26044 loopback = cm_node->loopbackpartner;
26045 cm_core = cm_node->cm_core;
26046 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26047 ntohl(cm_id->local_addr.sin_addr.s_addr),
26048 ntohs(cm_id->local_addr.sin_port));
26049
26050 - atomic_inc(&cm_connects);
26051 + atomic_inc_unchecked(&cm_connects);
26052 nesqp->active_conn = 1;
26053
26054 /* cache the cm_id in the qp */
26055 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26056 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26057 return err;
26058 }
26059 - atomic_inc(&cm_listens_created);
26060 + atomic_inc_unchecked(&cm_listens_created);
26061 }
26062
26063 cm_id->add_ref(cm_id);
26064 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26065 if (nesqp->destroyed) {
26066 return;
26067 }
26068 - atomic_inc(&cm_connecteds);
26069 + atomic_inc_unchecked(&cm_connecteds);
26070 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26071 " local port 0x%04X. jiffies = %lu.\n",
26072 nesqp->hwqp.qp_id,
26073 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26074
26075 cm_id->add_ref(cm_id);
26076 ret = cm_id->event_handler(cm_id, &cm_event);
26077 - atomic_inc(&cm_closes);
26078 + atomic_inc_unchecked(&cm_closes);
26079 cm_event.event = IW_CM_EVENT_CLOSE;
26080 cm_event.status = IW_CM_EVENT_STATUS_OK;
26081 cm_event.provider_data = cm_id->provider_data;
26082 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26083 return;
26084 cm_id = cm_node->cm_id;
26085
26086 - atomic_inc(&cm_connect_reqs);
26087 + atomic_inc_unchecked(&cm_connect_reqs);
26088 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26089 cm_node, cm_id, jiffies);
26090
26091 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26092 return;
26093 cm_id = cm_node->cm_id;
26094
26095 - atomic_inc(&cm_connect_reqs);
26096 + atomic_inc_unchecked(&cm_connect_reqs);
26097 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26098 cm_node, cm_id, jiffies);
26099
26100 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h
26101 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-05-19 00:06:34.000000000 -0400
26102 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-08-05 19:44:36.000000000 -0400
26103 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26104 extern unsigned int wqm_quanta;
26105 extern struct list_head nes_adapter_list;
26106
26107 -extern atomic_t cm_connects;
26108 -extern atomic_t cm_accepts;
26109 -extern atomic_t cm_disconnects;
26110 -extern atomic_t cm_closes;
26111 -extern atomic_t cm_connecteds;
26112 -extern atomic_t cm_connect_reqs;
26113 -extern atomic_t cm_rejects;
26114 -extern atomic_t mod_qp_timouts;
26115 -extern atomic_t qps_created;
26116 -extern atomic_t qps_destroyed;
26117 -extern atomic_t sw_qps_destroyed;
26118 +extern atomic_unchecked_t cm_connects;
26119 +extern atomic_unchecked_t cm_accepts;
26120 +extern atomic_unchecked_t cm_disconnects;
26121 +extern atomic_unchecked_t cm_closes;
26122 +extern atomic_unchecked_t cm_connecteds;
26123 +extern atomic_unchecked_t cm_connect_reqs;
26124 +extern atomic_unchecked_t cm_rejects;
26125 +extern atomic_unchecked_t mod_qp_timouts;
26126 +extern atomic_unchecked_t qps_created;
26127 +extern atomic_unchecked_t qps_destroyed;
26128 +extern atomic_unchecked_t sw_qps_destroyed;
26129 extern u32 mh_detected;
26130 extern u32 mh_pauses_sent;
26131 extern u32 cm_packets_sent;
26132 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26133 extern u32 cm_packets_received;
26134 extern u32 cm_packets_dropped;
26135 extern u32 cm_packets_retrans;
26136 -extern atomic_t cm_listens_created;
26137 -extern atomic_t cm_listens_destroyed;
26138 +extern atomic_unchecked_t cm_listens_created;
26139 +extern atomic_unchecked_t cm_listens_destroyed;
26140 extern u32 cm_backlog_drops;
26141 -extern atomic_t cm_loopbacks;
26142 -extern atomic_t cm_nodes_created;
26143 -extern atomic_t cm_nodes_destroyed;
26144 -extern atomic_t cm_accel_dropped_pkts;
26145 -extern atomic_t cm_resets_recvd;
26146 +extern atomic_unchecked_t cm_loopbacks;
26147 +extern atomic_unchecked_t cm_nodes_created;
26148 +extern atomic_unchecked_t cm_nodes_destroyed;
26149 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26150 +extern atomic_unchecked_t cm_resets_recvd;
26151
26152 extern u32 int_mod_timer_init;
26153 extern u32 int_mod_cq_depth_256;
26154 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c
26155 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-05-19 00:06:34.000000000 -0400
26156 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-05 19:44:36.000000000 -0400
26157 @@ -1302,31 +1302,31 @@ static void nes_netdev_get_ethtool_stats
26158 target_stat_values[++index] = mh_detected;
26159 target_stat_values[++index] = mh_pauses_sent;
26160 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26161 - target_stat_values[++index] = atomic_read(&cm_connects);
26162 - target_stat_values[++index] = atomic_read(&cm_accepts);
26163 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26164 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26165 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26166 - target_stat_values[++index] = atomic_read(&cm_rejects);
26167 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26168 - target_stat_values[++index] = atomic_read(&qps_created);
26169 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26170 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26171 - target_stat_values[++index] = atomic_read(&cm_closes);
26172 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26173 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26174 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26175 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26176 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26177 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26178 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26179 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26180 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26181 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26182 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26183 target_stat_values[++index] = cm_packets_sent;
26184 target_stat_values[++index] = cm_packets_bounced;
26185 target_stat_values[++index] = cm_packets_created;
26186 target_stat_values[++index] = cm_packets_received;
26187 target_stat_values[++index] = cm_packets_dropped;
26188 target_stat_values[++index] = cm_packets_retrans;
26189 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26190 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26191 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26192 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26193 target_stat_values[++index] = cm_backlog_drops;
26194 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26195 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26196 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26197 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26198 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26199 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26200 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26201 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26202 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26203 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26204 target_stat_values[++index] = nesadapter->free_4kpbl;
26205 target_stat_values[++index] = nesadapter->free_256pbl;
26206 target_stat_values[++index] = int_mod_timer_init;
26207 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c
26208 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-19 00:06:34.000000000 -0400
26209 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-05 19:44:36.000000000 -0400
26210 @@ -46,9 +46,9 @@
26211
26212 #include <rdma/ib_umem.h>
26213
26214 -atomic_t mod_qp_timouts;
26215 -atomic_t qps_created;
26216 -atomic_t sw_qps_destroyed;
26217 +atomic_unchecked_t mod_qp_timouts;
26218 +atomic_unchecked_t qps_created;
26219 +atomic_unchecked_t sw_qps_destroyed;
26220
26221 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26222
26223 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26224 if (init_attr->create_flags)
26225 return ERR_PTR(-EINVAL);
26226
26227 - atomic_inc(&qps_created);
26228 + atomic_inc_unchecked(&qps_created);
26229 switch (init_attr->qp_type) {
26230 case IB_QPT_RC:
26231 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26232 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26233 struct iw_cm_event cm_event;
26234 int ret;
26235
26236 - atomic_inc(&sw_qps_destroyed);
26237 + atomic_inc_unchecked(&sw_qps_destroyed);
26238 nesqp->destroyed = 1;
26239
26240 /* Blow away the connection if it exists. */
26241 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h
26242 --- linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-05-19 00:06:34.000000000 -0400
26243 +++ linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-08-05 20:34:06.000000000 -0400
26244 @@ -51,6 +51,7 @@
26245 #include <linux/completion.h>
26246 #include <linux/kref.h>
26247 #include <linux/sched.h>
26248 +#include <linux/slab.h>
26249
26250 #include "qib_common.h"
26251 #include "qib_verbs.h"
26252 diff -urNp linux-2.6.39.4/drivers/input/gameport/gameport.c linux-2.6.39.4/drivers/input/gameport/gameport.c
26253 --- linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-05-19 00:06:34.000000000 -0400
26254 +++ linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-08-05 19:44:37.000000000 -0400
26255 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26256 */
26257 static void gameport_init_port(struct gameport *gameport)
26258 {
26259 - static atomic_t gameport_no = ATOMIC_INIT(0);
26260 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26261
26262 __module_get(THIS_MODULE);
26263
26264 mutex_init(&gameport->drv_mutex);
26265 device_initialize(&gameport->dev);
26266 dev_set_name(&gameport->dev, "gameport%lu",
26267 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26268 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26269 gameport->dev.bus = &gameport_bus;
26270 gameport->dev.release = gameport_release_port;
26271 if (gameport->parent)
26272 diff -urNp linux-2.6.39.4/drivers/input/input.c linux-2.6.39.4/drivers/input/input.c
26273 --- linux-2.6.39.4/drivers/input/input.c 2011-07-09 09:18:51.000000000 -0400
26274 +++ linux-2.6.39.4/drivers/input/input.c 2011-08-05 19:44:37.000000000 -0400
26275 @@ -1815,7 +1815,7 @@ static void input_cleanse_bitmasks(struc
26276 */
26277 int input_register_device(struct input_dev *dev)
26278 {
26279 - static atomic_t input_no = ATOMIC_INIT(0);
26280 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26281 struct input_handler *handler;
26282 const char *path;
26283 int error;
26284 @@ -1852,7 +1852,7 @@ int input_register_device(struct input_d
26285 dev->setkeycode = input_default_setkeycode;
26286
26287 dev_set_name(&dev->dev, "input%ld",
26288 - (unsigned long) atomic_inc_return(&input_no) - 1);
26289 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26290
26291 error = device_add(&dev->dev);
26292 if (error)
26293 diff -urNp linux-2.6.39.4/drivers/input/joystick/sidewinder.c linux-2.6.39.4/drivers/input/joystick/sidewinder.c
26294 --- linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-05-19 00:06:34.000000000 -0400
26295 +++ linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-08-05 19:44:37.000000000 -0400
26296 @@ -30,6 +30,7 @@
26297 #include <linux/kernel.h>
26298 #include <linux/module.h>
26299 #include <linux/slab.h>
26300 +#include <linux/sched.h>
26301 #include <linux/init.h>
26302 #include <linux/input.h>
26303 #include <linux/gameport.h>
26304 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26305 unsigned char buf[SW_LENGTH];
26306 int i;
26307
26308 + pax_track_stack();
26309 +
26310 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26311
26312 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26313 diff -urNp linux-2.6.39.4/drivers/input/joystick/xpad.c linux-2.6.39.4/drivers/input/joystick/xpad.c
26314 --- linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-05-19 00:06:34.000000000 -0400
26315 +++ linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-08-05 19:44:37.000000000 -0400
26316 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26317
26318 static int xpad_led_probe(struct usb_xpad *xpad)
26319 {
26320 - static atomic_t led_seq = ATOMIC_INIT(0);
26321 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26322 long led_no;
26323 struct xpad_led *led;
26324 struct led_classdev *led_cdev;
26325 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26326 if (!led)
26327 return -ENOMEM;
26328
26329 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26330 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26331
26332 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26333 led->xpad = xpad;
26334 diff -urNp linux-2.6.39.4/drivers/input/mousedev.c linux-2.6.39.4/drivers/input/mousedev.c
26335 --- linux-2.6.39.4/drivers/input/mousedev.c 2011-07-09 09:18:51.000000000 -0400
26336 +++ linux-2.6.39.4/drivers/input/mousedev.c 2011-08-05 19:44:37.000000000 -0400
26337 @@ -764,7 +764,7 @@ static ssize_t mousedev_read(struct file
26338
26339 spin_unlock_irq(&client->packet_lock);
26340
26341 - if (copy_to_user(buffer, data, count))
26342 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26343 return -EFAULT;
26344
26345 return count;
26346 diff -urNp linux-2.6.39.4/drivers/input/serio/serio.c linux-2.6.39.4/drivers/input/serio/serio.c
26347 --- linux-2.6.39.4/drivers/input/serio/serio.c 2011-05-19 00:06:34.000000000 -0400
26348 +++ linux-2.6.39.4/drivers/input/serio/serio.c 2011-08-05 19:44:37.000000000 -0400
26349 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26350 */
26351 static void serio_init_port(struct serio *serio)
26352 {
26353 - static atomic_t serio_no = ATOMIC_INIT(0);
26354 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26355
26356 __module_get(THIS_MODULE);
26357
26358 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26359 mutex_init(&serio->drv_mutex);
26360 device_initialize(&serio->dev);
26361 dev_set_name(&serio->dev, "serio%ld",
26362 - (long)atomic_inc_return(&serio_no) - 1);
26363 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26364 serio->dev.bus = &serio_bus;
26365 serio->dev.release = serio_release_port;
26366 serio->dev.groups = serio_device_attr_groups;
26367 diff -urNp linux-2.6.39.4/drivers/isdn/capi/capi.c linux-2.6.39.4/drivers/isdn/capi/capi.c
26368 --- linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-05-19 00:06:34.000000000 -0400
26369 +++ linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-08-05 19:44:37.000000000 -0400
26370 @@ -89,8 +89,8 @@ struct capiminor {
26371
26372 struct capi20_appl *ap;
26373 u32 ncci;
26374 - atomic_t datahandle;
26375 - atomic_t msgid;
26376 + atomic_unchecked_t datahandle;
26377 + atomic_unchecked_t msgid;
26378
26379 struct tty_port port;
26380 int ttyinstop;
26381 @@ -414,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *m
26382 capimsg_setu16(s, 2, mp->ap->applid);
26383 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26384 capimsg_setu8 (s, 5, CAPI_RESP);
26385 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26386 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26387 capimsg_setu32(s, 8, mp->ncci);
26388 capimsg_setu16(s, 12, datahandle);
26389 }
26390 @@ -547,14 +547,14 @@ static void handle_minor_send(struct cap
26391 mp->outbytes -= len;
26392 spin_unlock_bh(&mp->outlock);
26393
26394 - datahandle = atomic_inc_return(&mp->datahandle);
26395 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26396 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26397 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26398 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26399 capimsg_setu16(skb->data, 2, mp->ap->applid);
26400 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26401 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26402 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26403 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26404 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26405 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26406 capimsg_setu16(skb->data, 16, len); /* Data length */
26407 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/common.c linux-2.6.39.4/drivers/isdn/gigaset/common.c
26408 --- linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-05-19 00:06:34.000000000 -0400
26409 +++ linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-08-05 19:44:37.000000000 -0400
26410 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26411 cs->commands_pending = 0;
26412 cs->cur_at_seq = 0;
26413 cs->gotfwver = -1;
26414 - cs->open_count = 0;
26415 + local_set(&cs->open_count, 0);
26416 cs->dev = NULL;
26417 cs->tty = NULL;
26418 cs->tty_dev = NULL;
26419 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h
26420 --- linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-05-19 00:06:34.000000000 -0400
26421 +++ linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-08-05 19:44:37.000000000 -0400
26422 @@ -35,6 +35,7 @@
26423 #include <linux/tty_driver.h>
26424 #include <linux/list.h>
26425 #include <asm/atomic.h>
26426 +#include <asm/local.h>
26427
26428 #define GIG_VERSION {0, 5, 0, 0}
26429 #define GIG_COMPAT {0, 4, 0, 0}
26430 @@ -433,7 +434,7 @@ struct cardstate {
26431 spinlock_t cmdlock;
26432 unsigned curlen, cmdbytes;
26433
26434 - unsigned open_count;
26435 + local_t open_count;
26436 struct tty_struct *tty;
26437 struct tasklet_struct if_wake_tasklet;
26438 unsigned control_state;
26439 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/interface.c linux-2.6.39.4/drivers/isdn/gigaset/interface.c
26440 --- linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-05-19 00:06:34.000000000 -0400
26441 +++ linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-08-05 19:44:37.000000000 -0400
26442 @@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
26443 return -ERESTARTSYS;
26444 tty->driver_data = cs;
26445
26446 - ++cs->open_count;
26447 -
26448 - if (cs->open_count == 1) {
26449 + if (local_inc_return(&cs->open_count) == 1) {
26450 spin_lock_irqsave(&cs->lock, flags);
26451 cs->tty = tty;
26452 spin_unlock_irqrestore(&cs->lock, flags);
26453 @@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
26454
26455 if (!cs->connected)
26456 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26457 - else if (!cs->open_count)
26458 + else if (!local_read(&cs->open_count))
26459 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26460 else {
26461 - if (!--cs->open_count) {
26462 + if (!local_dec_return(&cs->open_count)) {
26463 spin_lock_irqsave(&cs->lock, flags);
26464 cs->tty = NULL;
26465 spin_unlock_irqrestore(&cs->lock, flags);
26466 @@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
26467 if (!cs->connected) {
26468 gig_dbg(DEBUG_IF, "not connected");
26469 retval = -ENODEV;
26470 - } else if (!cs->open_count)
26471 + } else if (!local_read(&cs->open_count))
26472 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26473 else {
26474 retval = 0;
26475 @@ -358,7 +356,7 @@ static int if_write(struct tty_struct *t
26476 retval = -ENODEV;
26477 goto done;
26478 }
26479 - if (!cs->open_count) {
26480 + if (!local_read(&cs->open_count)) {
26481 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26482 retval = -ENODEV;
26483 goto done;
26484 @@ -411,7 +409,7 @@ static int if_write_room(struct tty_stru
26485 if (!cs->connected) {
26486 gig_dbg(DEBUG_IF, "not connected");
26487 retval = -ENODEV;
26488 - } else if (!cs->open_count)
26489 + } else if (!local_read(&cs->open_count))
26490 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26491 else if (cs->mstate != MS_LOCKED) {
26492 dev_warn(cs->dev, "can't write to unlocked device\n");
26493 @@ -441,7 +439,7 @@ static int if_chars_in_buffer(struct tty
26494
26495 if (!cs->connected)
26496 gig_dbg(DEBUG_IF, "not connected");
26497 - else if (!cs->open_count)
26498 + else if (!local_read(&cs->open_count))
26499 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26500 else if (cs->mstate != MS_LOCKED)
26501 dev_warn(cs->dev, "can't write to unlocked device\n");
26502 @@ -469,7 +467,7 @@ static void if_throttle(struct tty_struc
26503
26504 if (!cs->connected)
26505 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26506 - else if (!cs->open_count)
26507 + else if (!local_read(&cs->open_count))
26508 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26509 else
26510 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26511 @@ -493,7 +491,7 @@ static void if_unthrottle(struct tty_str
26512
26513 if (!cs->connected)
26514 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26515 - else if (!cs->open_count)
26516 + else if (!local_read(&cs->open_count))
26517 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26518 else
26519 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26520 @@ -524,7 +522,7 @@ static void if_set_termios(struct tty_st
26521 goto out;
26522 }
26523
26524 - if (!cs->open_count) {
26525 + if (!local_read(&cs->open_count)) {
26526 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26527 goto out;
26528 }
26529 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c
26530 --- linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-05-19 00:06:34.000000000 -0400
26531 +++ linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-08-05 19:44:37.000000000 -0400
26532 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26533 }
26534 if (left) {
26535 if (t4file->user) {
26536 - if (copy_from_user(buf, dp, left))
26537 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26538 return -EFAULT;
26539 } else {
26540 memcpy(buf, dp, left);
26541 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26542 }
26543 if (left) {
26544 if (config->user) {
26545 - if (copy_from_user(buf, dp, left))
26546 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26547 return -EFAULT;
26548 } else {
26549 memcpy(buf, dp, left);
26550 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c
26551 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-19 00:06:34.000000000 -0400
26552 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-05 19:44:37.000000000 -0400
26553 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26554 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26555 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26556
26557 + pax_track_stack();
26558
26559 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26560 {
26561 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c
26562 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-05-19 00:06:34.000000000 -0400
26563 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-05 19:44:37.000000000 -0400
26564 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26565 IDI_SYNC_REQ req;
26566 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26567
26568 + pax_track_stack();
26569 +
26570 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26571
26572 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26573 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c
26574 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-19 00:06:34.000000000 -0400
26575 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-05 19:44:37.000000000 -0400
26576 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26577 IDI_SYNC_REQ req;
26578 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26579
26580 + pax_track_stack();
26581 +
26582 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26583
26584 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26585 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c
26586 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-19 00:06:34.000000000 -0400
26587 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-05 19:44:37.000000000 -0400
26588 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
26589 IDI_SYNC_REQ req;
26590 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26591
26592 + pax_track_stack();
26593 +
26594 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26595
26596 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26597 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h
26598 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-05-19 00:06:34.000000000 -0400
26599 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:34:06.000000000 -0400
26600 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26601 } diva_didd_add_adapter_t;
26602 typedef struct _diva_didd_remove_adapter {
26603 IDI_CALL p_request;
26604 -} diva_didd_remove_adapter_t;
26605 +} __no_const diva_didd_remove_adapter_t;
26606 typedef struct _diva_didd_read_adapter_array {
26607 void * buffer;
26608 dword length;
26609 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c
26610 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-05-19 00:06:34.000000000 -0400
26611 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-05 19:44:37.000000000 -0400
26612 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26613 IDI_SYNC_REQ req;
26614 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26615
26616 + pax_track_stack();
26617 +
26618 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26619
26620 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26621 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c
26622 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-05-19 00:06:34.000000000 -0400
26623 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-08-05 19:44:37.000000000 -0400
26624 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
26625 dword d;
26626 word w;
26627
26628 + pax_track_stack();
26629 +
26630 a = plci->adapter;
26631 Id = ((word)plci->Id<<8)|a->Id;
26632 PUT_WORD(&SS_Ind[4],0x0000);
26633 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
26634 word j, n, w;
26635 dword d;
26636
26637 + pax_track_stack();
26638 +
26639
26640 for(i=0;i<8;i++) bp_parms[i].length = 0;
26641 for(i=0;i<2;i++) global_config[i].length = 0;
26642 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
26643 const byte llc3[] = {4,3,2,2,6,6,0};
26644 const byte header[] = {0,2,3,3,0,0,0};
26645
26646 + pax_track_stack();
26647 +
26648 for(i=0;i<8;i++) bp_parms[i].length = 0;
26649 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26650 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26651 @@ -14760,6 +14766,8 @@ static void group_optimization(DIVA_CAPI
26652 word appl_number_group_type[MAX_APPL];
26653 PLCI *auxplci;
26654
26655 + pax_track_stack();
26656 +
26657 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26658
26659 if(!a->group_optimization_enabled)
26660 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c
26661 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-19 00:06:34.000000000 -0400
26662 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-05 19:44:37.000000000 -0400
26663 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26664 IDI_SYNC_REQ req;
26665 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26666
26667 + pax_track_stack();
26668 +
26669 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26670
26671 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26672 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26673 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-05-19 00:06:34.000000000 -0400
26674 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:34:06.000000000 -0400
26675 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26676 typedef struct _diva_os_idi_adapter_interface {
26677 diva_init_card_proc_t cleanup_adapter_proc;
26678 diva_cmd_card_proc_t cmd_proc;
26679 -} diva_os_idi_adapter_interface_t;
26680 +} __no_const diva_os_idi_adapter_interface_t;
26681
26682 typedef struct _diva_os_xdi_adapter {
26683 struct list_head link;
26684 diff -urNp linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c
26685 --- linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-05-19 00:06:34.000000000 -0400
26686 +++ linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-08-05 19:44:37.000000000 -0400
26687 @@ -1292,6 +1292,8 @@ isdn_ioctl(struct file *file, uint cmd,
26688 } iocpar;
26689 void __user *argp = (void __user *)arg;
26690
26691 + pax_track_stack();
26692 +
26693 #define name iocpar.name
26694 #define bname iocpar.bname
26695 #define iocts iocpar.iocts
26696 diff -urNp linux-2.6.39.4/drivers/isdn/icn/icn.c linux-2.6.39.4/drivers/isdn/icn/icn.c
26697 --- linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-05-19 00:06:34.000000000 -0400
26698 +++ linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-08-05 19:44:37.000000000 -0400
26699 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26700 if (count > len)
26701 count = len;
26702 if (user) {
26703 - if (copy_from_user(msg, buf, count))
26704 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26705 return -EFAULT;
26706 } else
26707 memcpy(msg, buf, count);
26708 diff -urNp linux-2.6.39.4/drivers/lguest/core.c linux-2.6.39.4/drivers/lguest/core.c
26709 --- linux-2.6.39.4/drivers/lguest/core.c 2011-05-19 00:06:34.000000000 -0400
26710 +++ linux-2.6.39.4/drivers/lguest/core.c 2011-08-05 19:44:37.000000000 -0400
26711 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26712 * it's worked so far. The end address needs +1 because __get_vm_area
26713 * allocates an extra guard page, so we need space for that.
26714 */
26715 +
26716 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26717 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26718 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26719 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26720 +#else
26721 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26722 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26723 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26724 +#endif
26725 +
26726 if (!switcher_vma) {
26727 err = -ENOMEM;
26728 printk("lguest: could not map switcher pages high\n");
26729 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26730 * Now the Switcher is mapped at the right address, we can't fail!
26731 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26732 */
26733 - memcpy(switcher_vma->addr, start_switcher_text,
26734 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26735 end_switcher_text - start_switcher_text);
26736
26737 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26738 diff -urNp linux-2.6.39.4/drivers/lguest/x86/core.c linux-2.6.39.4/drivers/lguest/x86/core.c
26739 --- linux-2.6.39.4/drivers/lguest/x86/core.c 2011-05-19 00:06:34.000000000 -0400
26740 +++ linux-2.6.39.4/drivers/lguest/x86/core.c 2011-08-05 19:44:37.000000000 -0400
26741 @@ -59,7 +59,7 @@ static struct {
26742 /* Offset from where switcher.S was compiled to where we've copied it */
26743 static unsigned long switcher_offset(void)
26744 {
26745 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26746 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26747 }
26748
26749 /* This cpu's struct lguest_pages. */
26750 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26751 * These copies are pretty cheap, so we do them unconditionally: */
26752 /* Save the current Host top-level page directory.
26753 */
26754 +
26755 +#ifdef CONFIG_PAX_PER_CPU_PGD
26756 + pages->state.host_cr3 = read_cr3();
26757 +#else
26758 pages->state.host_cr3 = __pa(current->mm->pgd);
26759 +#endif
26760 +
26761 /*
26762 * Set up the Guest's page tables to see this CPU's pages (and no
26763 * other CPU's pages).
26764 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26765 * compiled-in switcher code and the high-mapped copy we just made.
26766 */
26767 for (i = 0; i < IDT_ENTRIES; i++)
26768 - default_idt_entries[i] += switcher_offset();
26769 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26770
26771 /*
26772 * Set up the Switcher's per-cpu areas.
26773 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26774 * it will be undisturbed when we switch. To change %cs and jump we
26775 * need this structure to feed to Intel's "lcall" instruction.
26776 */
26777 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26778 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26779 lguest_entry.segment = LGUEST_CS;
26780
26781 /*
26782 diff -urNp linux-2.6.39.4/drivers/lguest/x86/switcher_32.S linux-2.6.39.4/drivers/lguest/x86/switcher_32.S
26783 --- linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-05-19 00:06:34.000000000 -0400
26784 +++ linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-08-05 19:44:37.000000000 -0400
26785 @@ -87,6 +87,7 @@
26786 #include <asm/page.h>
26787 #include <asm/segment.h>
26788 #include <asm/lguest.h>
26789 +#include <asm/processor-flags.h>
26790
26791 // We mark the start of the code to copy
26792 // It's placed in .text tho it's never run here
26793 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26794 // Changes type when we load it: damn Intel!
26795 // For after we switch over our page tables
26796 // That entry will be read-only: we'd crash.
26797 +
26798 +#ifdef CONFIG_PAX_KERNEXEC
26799 + mov %cr0, %edx
26800 + xor $X86_CR0_WP, %edx
26801 + mov %edx, %cr0
26802 +#endif
26803 +
26804 movl $(GDT_ENTRY_TSS*8), %edx
26805 ltr %dx
26806
26807 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26808 // Let's clear it again for our return.
26809 // The GDT descriptor of the Host
26810 // Points to the table after two "size" bytes
26811 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26812 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26813 // Clear "used" from type field (byte 5, bit 2)
26814 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26815 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26816 +
26817 +#ifdef CONFIG_PAX_KERNEXEC
26818 + mov %cr0, %eax
26819 + xor $X86_CR0_WP, %eax
26820 + mov %eax, %cr0
26821 +#endif
26822
26823 // Once our page table's switched, the Guest is live!
26824 // The Host fades as we run this final step.
26825 @@ -295,13 +309,12 @@ deliver_to_host:
26826 // I consulted gcc, and it gave
26827 // These instructions, which I gladly credit:
26828 leal (%edx,%ebx,8), %eax
26829 - movzwl (%eax),%edx
26830 - movl 4(%eax), %eax
26831 - xorw %ax, %ax
26832 - orl %eax, %edx
26833 + movl 4(%eax), %edx
26834 + movw (%eax), %dx
26835 // Now the address of the handler's in %edx
26836 // We call it now: its "iret" drops us home.
26837 - jmp *%edx
26838 + ljmp $__KERNEL_CS, $1f
26839 +1: jmp *%edx
26840
26841 // Every interrupt can come to us here
26842 // But we must truly tell each apart.
26843 diff -urNp linux-2.6.39.4/drivers/md/dm.c linux-2.6.39.4/drivers/md/dm.c
26844 --- linux-2.6.39.4/drivers/md/dm.c 2011-05-19 00:06:34.000000000 -0400
26845 +++ linux-2.6.39.4/drivers/md/dm.c 2011-08-05 19:44:37.000000000 -0400
26846 @@ -162,9 +162,9 @@ struct mapped_device {
26847 /*
26848 * Event handling.
26849 */
26850 - atomic_t event_nr;
26851 + atomic_unchecked_t event_nr;
26852 wait_queue_head_t eventq;
26853 - atomic_t uevent_seq;
26854 + atomic_unchecked_t uevent_seq;
26855 struct list_head uevent_list;
26856 spinlock_t uevent_lock; /* Protect access to uevent_list */
26857
26858 @@ -1836,8 +1836,8 @@ static struct mapped_device *alloc_dev(i
26859 rwlock_init(&md->map_lock);
26860 atomic_set(&md->holders, 1);
26861 atomic_set(&md->open_count, 0);
26862 - atomic_set(&md->event_nr, 0);
26863 - atomic_set(&md->uevent_seq, 0);
26864 + atomic_set_unchecked(&md->event_nr, 0);
26865 + atomic_set_unchecked(&md->uevent_seq, 0);
26866 INIT_LIST_HEAD(&md->uevent_list);
26867 spin_lock_init(&md->uevent_lock);
26868
26869 @@ -1971,7 +1971,7 @@ static void event_callback(void *context
26870
26871 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26872
26873 - atomic_inc(&md->event_nr);
26874 + atomic_inc_unchecked(&md->event_nr);
26875 wake_up(&md->eventq);
26876 }
26877
26878 @@ -2547,18 +2547,18 @@ int dm_kobject_uevent(struct mapped_devi
26879
26880 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26881 {
26882 - return atomic_add_return(1, &md->uevent_seq);
26883 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26884 }
26885
26886 uint32_t dm_get_event_nr(struct mapped_device *md)
26887 {
26888 - return atomic_read(&md->event_nr);
26889 + return atomic_read_unchecked(&md->event_nr);
26890 }
26891
26892 int dm_wait_event(struct mapped_device *md, int event_nr)
26893 {
26894 return wait_event_interruptible(md->eventq,
26895 - (event_nr != atomic_read(&md->event_nr)));
26896 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26897 }
26898
26899 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26900 diff -urNp linux-2.6.39.4/drivers/md/dm-ioctl.c linux-2.6.39.4/drivers/md/dm-ioctl.c
26901 --- linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-05-19 00:06:34.000000000 -0400
26902 +++ linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-08-05 19:44:37.000000000 -0400
26903 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26904 cmd == DM_LIST_VERSIONS_CMD)
26905 return 0;
26906
26907 - if ((cmd == DM_DEV_CREATE_CMD)) {
26908 + if (cmd == DM_DEV_CREATE_CMD) {
26909 if (!*param->name) {
26910 DMWARN("name not supplied when creating device");
26911 return -EINVAL;
26912 diff -urNp linux-2.6.39.4/drivers/md/dm-raid1.c linux-2.6.39.4/drivers/md/dm-raid1.c
26913 --- linux-2.6.39.4/drivers/md/dm-raid1.c 2011-05-19 00:06:34.000000000 -0400
26914 +++ linux-2.6.39.4/drivers/md/dm-raid1.c 2011-08-05 19:44:37.000000000 -0400
26915 @@ -42,7 +42,7 @@ enum dm_raid1_error {
26916
26917 struct mirror {
26918 struct mirror_set *ms;
26919 - atomic_t error_count;
26920 + atomic_unchecked_t error_count;
26921 unsigned long error_type;
26922 struct dm_dev *dev;
26923 sector_t offset;
26924 @@ -187,7 +187,7 @@ static struct mirror *get_valid_mirror(s
26925 struct mirror *m;
26926
26927 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26928 - if (!atomic_read(&m->error_count))
26929 + if (!atomic_read_unchecked(&m->error_count))
26930 return m;
26931
26932 return NULL;
26933 @@ -219,7 +219,7 @@ static void fail_mirror(struct mirror *m
26934 * simple way to tell if a device has encountered
26935 * errors.
26936 */
26937 - atomic_inc(&m->error_count);
26938 + atomic_inc_unchecked(&m->error_count);
26939
26940 if (test_and_set_bit(error_type, &m->error_type))
26941 return;
26942 @@ -410,7 +410,7 @@ static struct mirror *choose_mirror(stru
26943 struct mirror *m = get_default_mirror(ms);
26944
26945 do {
26946 - if (likely(!atomic_read(&m->error_count)))
26947 + if (likely(!atomic_read_unchecked(&m->error_count)))
26948 return m;
26949
26950 if (m-- == ms->mirror)
26951 @@ -424,7 +424,7 @@ static int default_ok(struct mirror *m)
26952 {
26953 struct mirror *default_mirror = get_default_mirror(m->ms);
26954
26955 - return !atomic_read(&default_mirror->error_count);
26956 + return !atomic_read_unchecked(&default_mirror->error_count);
26957 }
26958
26959 static int mirror_available(struct mirror_set *ms, struct bio *bio)
26960 @@ -561,7 +561,7 @@ static void do_reads(struct mirror_set *
26961 */
26962 if (likely(region_in_sync(ms, region, 1)))
26963 m = choose_mirror(ms, bio->bi_sector);
26964 - else if (m && atomic_read(&m->error_count))
26965 + else if (m && atomic_read_unchecked(&m->error_count))
26966 m = NULL;
26967
26968 if (likely(m))
26969 @@ -939,7 +939,7 @@ static int get_mirror(struct mirror_set
26970 }
26971
26972 ms->mirror[mirror].ms = ms;
26973 - atomic_set(&(ms->mirror[mirror].error_count), 0);
26974 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
26975 ms->mirror[mirror].error_type = 0;
26976 ms->mirror[mirror].offset = offset;
26977
26978 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
26979 */
26980 static char device_status_char(struct mirror *m)
26981 {
26982 - if (!atomic_read(&(m->error_count)))
26983 + if (!atomic_read_unchecked(&(m->error_count)))
26984 return 'A';
26985
26986 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
26987 diff -urNp linux-2.6.39.4/drivers/md/dm-stripe.c linux-2.6.39.4/drivers/md/dm-stripe.c
26988 --- linux-2.6.39.4/drivers/md/dm-stripe.c 2011-05-19 00:06:34.000000000 -0400
26989 +++ linux-2.6.39.4/drivers/md/dm-stripe.c 2011-08-05 19:44:37.000000000 -0400
26990 @@ -20,7 +20,7 @@ struct stripe {
26991 struct dm_dev *dev;
26992 sector_t physical_start;
26993
26994 - atomic_t error_count;
26995 + atomic_unchecked_t error_count;
26996 };
26997
26998 struct stripe_c {
26999 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
27000 kfree(sc);
27001 return r;
27002 }
27003 - atomic_set(&(sc->stripe[i].error_count), 0);
27004 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
27005 }
27006
27007 ti->private = sc;
27008 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
27009 DMEMIT("%d ", sc->stripes);
27010 for (i = 0; i < sc->stripes; i++) {
27011 DMEMIT("%s ", sc->stripe[i].dev->name);
27012 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
27013 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
27014 'D' : 'A';
27015 }
27016 buffer[i] = '\0';
27017 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
27018 */
27019 for (i = 0; i < sc->stripes; i++)
27020 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
27021 - atomic_inc(&(sc->stripe[i].error_count));
27022 - if (atomic_read(&(sc->stripe[i].error_count)) <
27023 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
27024 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
27025 DM_IO_ERROR_THRESHOLD)
27026 schedule_work(&sc->trigger_event);
27027 }
27028 diff -urNp linux-2.6.39.4/drivers/md/dm-table.c linux-2.6.39.4/drivers/md/dm-table.c
27029 --- linux-2.6.39.4/drivers/md/dm-table.c 2011-06-03 00:04:14.000000000 -0400
27030 +++ linux-2.6.39.4/drivers/md/dm-table.c 2011-08-05 19:44:37.000000000 -0400
27031 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
27032 if (!dev_size)
27033 return 0;
27034
27035 - if ((start >= dev_size) || (start + len > dev_size)) {
27036 + if ((start >= dev_size) || (len > dev_size - start)) {
27037 DMWARN("%s: %s too small for target: "
27038 "start=%llu, len=%llu, dev_size=%llu",
27039 dm_device_name(ti->table->md), bdevname(bdev, b),
27040 diff -urNp linux-2.6.39.4/drivers/md/md.c linux-2.6.39.4/drivers/md/md.c
27041 --- linux-2.6.39.4/drivers/md/md.c 2011-07-09 09:18:51.000000000 -0400
27042 +++ linux-2.6.39.4/drivers/md/md.c 2011-08-05 19:44:37.000000000 -0400
27043 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27044 * start build, activate spare
27045 */
27046 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27047 -static atomic_t md_event_count;
27048 +static atomic_unchecked_t md_event_count;
27049 void md_new_event(mddev_t *mddev)
27050 {
27051 - atomic_inc(&md_event_count);
27052 + atomic_inc_unchecked(&md_event_count);
27053 wake_up(&md_event_waiters);
27054 }
27055 EXPORT_SYMBOL_GPL(md_new_event);
27056 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27057 */
27058 static void md_new_event_inintr(mddev_t *mddev)
27059 {
27060 - atomic_inc(&md_event_count);
27061 + atomic_inc_unchecked(&md_event_count);
27062 wake_up(&md_event_waiters);
27063 }
27064
27065 @@ -1454,7 +1454,7 @@ static int super_1_load(mdk_rdev_t *rdev
27066
27067 rdev->preferred_minor = 0xffff;
27068 rdev->data_offset = le64_to_cpu(sb->data_offset);
27069 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27070 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27071
27072 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27073 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27074 @@ -1632,7 +1632,7 @@ static void super_1_sync(mddev_t *mddev,
27075 else
27076 sb->resync_offset = cpu_to_le64(0);
27077
27078 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27079 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27080
27081 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27082 sb->size = cpu_to_le64(mddev->dev_sectors);
27083 @@ -2414,7 +2414,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27084 static ssize_t
27085 errors_show(mdk_rdev_t *rdev, char *page)
27086 {
27087 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27088 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27089 }
27090
27091 static ssize_t
27092 @@ -2423,7 +2423,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27093 char *e;
27094 unsigned long n = simple_strtoul(buf, &e, 10);
27095 if (*buf && (*e == 0 || *e == '\n')) {
27096 - atomic_set(&rdev->corrected_errors, n);
27097 + atomic_set_unchecked(&rdev->corrected_errors, n);
27098 return len;
27099 }
27100 return -EINVAL;
27101 @@ -2779,8 +2779,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27102 rdev->last_read_error.tv_sec = 0;
27103 rdev->last_read_error.tv_nsec = 0;
27104 atomic_set(&rdev->nr_pending, 0);
27105 - atomic_set(&rdev->read_errors, 0);
27106 - atomic_set(&rdev->corrected_errors, 0);
27107 + atomic_set_unchecked(&rdev->read_errors, 0);
27108 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27109
27110 INIT_LIST_HEAD(&rdev->same_set);
27111 init_waitqueue_head(&rdev->blocked_wait);
27112 @@ -6388,7 +6388,7 @@ static int md_seq_show(struct seq_file *
27113
27114 spin_unlock(&pers_lock);
27115 seq_printf(seq, "\n");
27116 - mi->event = atomic_read(&md_event_count);
27117 + mi->event = atomic_read_unchecked(&md_event_count);
27118 return 0;
27119 }
27120 if (v == (void*)2) {
27121 @@ -6477,7 +6477,7 @@ static int md_seq_show(struct seq_file *
27122 chunk_kb ? "KB" : "B");
27123 if (bitmap->file) {
27124 seq_printf(seq, ", file: ");
27125 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27126 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27127 }
27128
27129 seq_printf(seq, "\n");
27130 @@ -6511,7 +6511,7 @@ static int md_seq_open(struct inode *ino
27131 else {
27132 struct seq_file *p = file->private_data;
27133 p->private = mi;
27134 - mi->event = atomic_read(&md_event_count);
27135 + mi->event = atomic_read_unchecked(&md_event_count);
27136 }
27137 return error;
27138 }
27139 @@ -6527,7 +6527,7 @@ static unsigned int mdstat_poll(struct f
27140 /* always allow read */
27141 mask = POLLIN | POLLRDNORM;
27142
27143 - if (mi->event != atomic_read(&md_event_count))
27144 + if (mi->event != atomic_read_unchecked(&md_event_count))
27145 mask |= POLLERR | POLLPRI;
27146 return mask;
27147 }
27148 @@ -6571,7 +6571,7 @@ static int is_mddev_idle(mddev_t *mddev,
27149 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27150 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27151 (int)part_stat_read(&disk->part0, sectors[1]) -
27152 - atomic_read(&disk->sync_io);
27153 + atomic_read_unchecked(&disk->sync_io);
27154 /* sync IO will cause sync_io to increase before the disk_stats
27155 * as sync_io is counted when a request starts, and
27156 * disk_stats is counted when it completes.
27157 diff -urNp linux-2.6.39.4/drivers/md/md.h linux-2.6.39.4/drivers/md/md.h
27158 --- linux-2.6.39.4/drivers/md/md.h 2011-05-19 00:06:34.000000000 -0400
27159 +++ linux-2.6.39.4/drivers/md/md.h 2011-08-05 19:44:37.000000000 -0400
27160 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27161 * only maintained for arrays that
27162 * support hot removal
27163 */
27164 - atomic_t read_errors; /* number of consecutive read errors that
27165 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27166 * we have tried to ignore.
27167 */
27168 struct timespec last_read_error; /* monotonic time since our
27169 * last read error
27170 */
27171 - atomic_t corrected_errors; /* number of corrected read errors,
27172 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27173 * for reporting to userspace and storing
27174 * in superblock.
27175 */
27176 @@ -342,7 +342,7 @@ static inline void rdev_dec_pending(mdk_
27177
27178 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27179 {
27180 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27181 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27182 }
27183
27184 struct mdk_personality
27185 diff -urNp linux-2.6.39.4/drivers/md/raid10.c linux-2.6.39.4/drivers/md/raid10.c
27186 --- linux-2.6.39.4/drivers/md/raid10.c 2011-05-19 00:06:34.000000000 -0400
27187 +++ linux-2.6.39.4/drivers/md/raid10.c 2011-08-05 19:44:37.000000000 -0400
27188 @@ -1209,7 +1209,7 @@ static void end_sync_read(struct bio *bi
27189 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27190 set_bit(R10BIO_Uptodate, &r10_bio->state);
27191 else {
27192 - atomic_add(r10_bio->sectors,
27193 + atomic_add_unchecked(r10_bio->sectors,
27194 &conf->mirrors[d].rdev->corrected_errors);
27195 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27196 md_error(r10_bio->mddev,
27197 @@ -1417,7 +1417,7 @@ static void check_decay_read_errors(mdde
27198 {
27199 struct timespec cur_time_mon;
27200 unsigned long hours_since_last;
27201 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27202 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27203
27204 ktime_get_ts(&cur_time_mon);
27205
27206 @@ -1439,9 +1439,9 @@ static void check_decay_read_errors(mdde
27207 * overflowing the shift of read_errors by hours_since_last.
27208 */
27209 if (hours_since_last >= 8 * sizeof(read_errors))
27210 - atomic_set(&rdev->read_errors, 0);
27211 + atomic_set_unchecked(&rdev->read_errors, 0);
27212 else
27213 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27214 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27215 }
27216
27217 /*
27218 @@ -1476,8 +1476,8 @@ static void fix_read_error(conf_t *conf,
27219 }
27220
27221 check_decay_read_errors(mddev, rdev);
27222 - atomic_inc(&rdev->read_errors);
27223 - cur_read_error_count = atomic_read(&rdev->read_errors);
27224 + atomic_inc_unchecked(&rdev->read_errors);
27225 + cur_read_error_count = atomic_read_unchecked(&rdev->read_errors);
27226 if (cur_read_error_count > max_read_errors) {
27227 rcu_read_unlock();
27228 printk(KERN_NOTICE
27229 @@ -1550,7 +1550,7 @@ static void fix_read_error(conf_t *conf,
27230 test_bit(In_sync, &rdev->flags)) {
27231 atomic_inc(&rdev->nr_pending);
27232 rcu_read_unlock();
27233 - atomic_add(s, &rdev->corrected_errors);
27234 + atomic_add_unchecked(s, &rdev->corrected_errors);
27235 if (sync_page_io(rdev,
27236 r10_bio->devs[sl].addr +
27237 sect,
27238 diff -urNp linux-2.6.39.4/drivers/md/raid1.c linux-2.6.39.4/drivers/md/raid1.c
27239 --- linux-2.6.39.4/drivers/md/raid1.c 2011-05-19 00:06:34.000000000 -0400
27240 +++ linux-2.6.39.4/drivers/md/raid1.c 2011-08-05 19:44:37.000000000 -0400
27241 @@ -1342,7 +1342,7 @@ static void sync_request_write(mddev_t *
27242 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
27243 continue;
27244 rdev = conf->mirrors[d].rdev;
27245 - atomic_add(s, &rdev->corrected_errors);
27246 + atomic_add_unchecked(s, &rdev->corrected_errors);
27247 if (sync_page_io(rdev,
27248 sect,
27249 s<<9,
27250 @@ -1488,7 +1488,7 @@ static void fix_read_error(conf_t *conf,
27251 /* Well, this device is dead */
27252 md_error(mddev, rdev);
27253 else {
27254 - atomic_add(s, &rdev->corrected_errors);
27255 + atomic_add_unchecked(s, &rdev->corrected_errors);
27256 printk(KERN_INFO
27257 "md/raid1:%s: read error corrected "
27258 "(%d sectors at %llu on %s)\n",
27259 diff -urNp linux-2.6.39.4/drivers/md/raid5.c linux-2.6.39.4/drivers/md/raid5.c
27260 --- linux-2.6.39.4/drivers/md/raid5.c 2011-06-25 12:55:22.000000000 -0400
27261 +++ linux-2.6.39.4/drivers/md/raid5.c 2011-08-05 19:44:37.000000000 -0400
27262 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27263 bi->bi_next = NULL;
27264 if ((rw & WRITE) &&
27265 test_bit(R5_ReWrite, &sh->dev[i].flags))
27266 - atomic_add(STRIPE_SECTORS,
27267 + atomic_add_unchecked(STRIPE_SECTORS,
27268 &rdev->corrected_errors);
27269 generic_make_request(bi);
27270 } else {
27271 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27272 clear_bit(R5_ReadError, &sh->dev[i].flags);
27273 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27274 }
27275 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27276 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27277 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27278 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27279 } else {
27280 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27281 int retry = 0;
27282 rdev = conf->disks[i].rdev;
27283
27284 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27285 - atomic_inc(&rdev->read_errors);
27286 + atomic_inc_unchecked(&rdev->read_errors);
27287 if (conf->mddev->degraded >= conf->max_degraded)
27288 printk_rl(KERN_WARNING
27289 "md/raid:%s: read error not correctable "
27290 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27291 (unsigned long long)(sh->sector
27292 + rdev->data_offset),
27293 bdn);
27294 - else if (atomic_read(&rdev->read_errors)
27295 + else if (atomic_read_unchecked(&rdev->read_errors)
27296 > conf->max_nr_stripes)
27297 printk(KERN_WARNING
27298 "md/raid:%s: Too many read errors, failing device %s.\n",
27299 @@ -1947,6 +1947,7 @@ static sector_t compute_blocknr(struct s
27300 sector_t r_sector;
27301 struct stripe_head sh2;
27302
27303 + pax_track_stack();
27304
27305 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27306 stripe = new_sector;
27307 diff -urNp linux-2.6.39.4/drivers/media/common/saa7146_hlp.c linux-2.6.39.4/drivers/media/common/saa7146_hlp.c
27308 --- linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-05-19 00:06:34.000000000 -0400
27309 +++ linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-08-05 19:44:37.000000000 -0400
27310 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27311
27312 int x[32], y[32], w[32], h[32];
27313
27314 + pax_track_stack();
27315 +
27316 /* clear out memory */
27317 memset(&line_list[0], 0x00, sizeof(u32)*32);
27318 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27319 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27320 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-19 00:06:34.000000000 -0400
27321 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-05 19:44:37.000000000 -0400
27322 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27323 u8 buf[HOST_LINK_BUF_SIZE];
27324 int i;
27325
27326 + pax_track_stack();
27327 +
27328 dprintk("%s\n", __func__);
27329
27330 /* check if we have space for a link buf in the rx_buffer */
27331 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27332 unsigned long timeout;
27333 int written;
27334
27335 + pax_track_stack();
27336 +
27337 dprintk("%s\n", __func__);
27338
27339 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27340 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h
27341 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-05-19 00:06:34.000000000 -0400
27342 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:34:06.000000000 -0400
27343 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
27344 union {
27345 dmx_ts_cb ts;
27346 dmx_section_cb sec;
27347 - } cb;
27348 + } __no_const cb;
27349
27350 struct dvb_demux *demux;
27351 void *priv;
27352 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c
27353 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-05-19 00:06:34.000000000 -0400
27354 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:34:06.000000000 -0400
27355 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27356 const struct dvb_device *template, void *priv, int type)
27357 {
27358 struct dvb_device *dvbdev;
27359 - struct file_operations *dvbdevfops;
27360 + file_operations_no_const *dvbdevfops;
27361 struct device *clsdev;
27362 int minor;
27363 int id;
27364 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c
27365 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-05-19 00:06:34.000000000 -0400
27366 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:34:06.000000000 -0400
27367 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27368 struct dib0700_adapter_state {
27369 int (*set_param_save) (struct dvb_frontend *,
27370 struct dvb_frontend_parameters *);
27371 -};
27372 +} __no_const;
27373
27374 static int dib7070_set_param_override(struct dvb_frontend *fe,
27375 struct dvb_frontend_parameters *fep)
27376 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27377 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-19 00:06:34.000000000 -0400
27378 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-05 19:44:37.000000000 -0400
27379 @@ -391,6 +391,8 @@ int dib0700_download_firmware(struct usb
27380
27381 u8 buf[260];
27382
27383 + pax_track_stack();
27384 +
27385 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27386 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27387 hx.addr, hx.len, hx.chk);
27388 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c
27389 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-05-19 00:06:34.000000000 -0400
27390 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-05 20:34:06.000000000 -0400
27391 @@ -95,7 +95,7 @@ struct su3000_state {
27392
27393 struct s6x0_state {
27394 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27395 -};
27396 +} __no_const;
27397
27398 /* debug */
27399 static int dvb_usb_dw2102_debug;
27400 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c
27401 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-05-19 00:06:34.000000000 -0400
27402 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-05 19:44:37.000000000 -0400
27403 @@ -663,6 +663,7 @@ static int lme2510_download_firmware(str
27404 packet_size = 0x31;
27405 len_in = 1;
27406
27407 + pax_track_stack();
27408
27409 info("FRM Starting Firmware Download");
27410
27411 @@ -715,6 +716,8 @@ static void lme_coldreset(struct usb_dev
27412 int ret = 0, len_in;
27413 u8 data[512] = {0};
27414
27415 + pax_track_stack();
27416 +
27417 data[0] = 0x0a;
27418 len_in = 1;
27419 info("FRM Firmware Cold Reset");
27420 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h
27421 --- linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-05-19 00:06:34.000000000 -0400
27422 +++ linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:34:06.000000000 -0400
27423 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
27424 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
27425 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27426 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27427 -};
27428 +} __no_const;
27429
27430 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27431 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27432 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c
27433 --- linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-05-19 00:06:34.000000000 -0400
27434 +++ linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-05 19:44:37.000000000 -0400
27435 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27436 int ret = -1;
27437 int sync;
27438
27439 + pax_track_stack();
27440 +
27441 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27442
27443 fcp = 3000;
27444 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c
27445 --- linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-05-19 00:06:34.000000000 -0400
27446 +++ linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-08-05 19:44:37.000000000 -0400
27447 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27448 u8 tudata[585];
27449 int i;
27450
27451 + pax_track_stack();
27452 +
27453 dprintk("Firmware is %zd bytes\n",fw->size);
27454
27455 /* Get eprom data */
27456 diff -urNp linux-2.6.39.4/drivers/media/radio/radio-cadet.c linux-2.6.39.4/drivers/media/radio/radio-cadet.c
27457 --- linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-05-19 00:06:34.000000000 -0400
27458 +++ linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-08-05 19:44:37.000000000 -0400
27459 @@ -349,7 +349,7 @@ static ssize_t cadet_read(struct file *f
27460 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
27461 mutex_unlock(&dev->lock);
27462
27463 - if (copy_to_user(data, readbuf, i))
27464 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
27465 return -EFAULT;
27466 return i;
27467 }
27468 diff -urNp linux-2.6.39.4/drivers/media/rc/rc-main.c linux-2.6.39.4/drivers/media/rc/rc-main.c
27469 --- linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-05-19 00:06:34.000000000 -0400
27470 +++ linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-08-05 19:44:37.000000000 -0400
27471 @@ -996,7 +996,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
27472
27473 int rc_register_device(struct rc_dev *dev)
27474 {
27475 - static atomic_t devno = ATOMIC_INIT(0);
27476 + static atomic_unchecked_t devno = ATOMIC_INIT(0);
27477 struct rc_map *rc_map;
27478 const char *path;
27479 int rc;
27480 @@ -1019,7 +1019,7 @@ int rc_register_device(struct rc_dev *de
27481 if (dev->close)
27482 dev->input_dev->close = ir_close;
27483
27484 - dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
27485 + dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
27486 dev_set_name(&dev->dev, "rc%ld", dev->devno);
27487 dev_set_drvdata(&dev->dev, dev);
27488 rc = device_add(&dev->dev);
27489 diff -urNp linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c
27490 --- linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-05-19 00:06:34.000000000 -0400
27491 +++ linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-08-05 19:44:37.000000000 -0400
27492 @@ -61,7 +61,7 @@ static struct pci_device_id cx18_pci_tbl
27493
27494 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
27495
27496 -static atomic_t cx18_instance = ATOMIC_INIT(0);
27497 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
27498
27499 /* Parameter declarations */
27500 static int cardtype[CX18_MAX_CARDS];
27501 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27502 struct i2c_client c;
27503 u8 eedata[256];
27504
27505 + pax_track_stack();
27506 +
27507 memset(&c, 0, sizeof(c));
27508 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27509 c.adapter = &cx->i2c_adap[0];
27510 @@ -892,7 +894,7 @@ static int __devinit cx18_probe(struct p
27511 struct cx18 *cx;
27512
27513 /* FIXME - module parameter arrays constrain max instances */
27514 - i = atomic_inc_return(&cx18_instance) - 1;
27515 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
27516 if (i >= CX18_MAX_CARDS) {
27517 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
27518 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
27519 diff -urNp linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c
27520 --- linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-05-19 00:06:34.000000000 -0400
27521 +++ linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-05 19:44:37.000000000 -0400
27522 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27523 bool handle = false;
27524 struct ir_raw_event ir_core_event[64];
27525
27526 + pax_track_stack();
27527 +
27528 do {
27529 num = 0;
27530 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27531 diff -urNp linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c
27532 --- linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-05-19 00:06:34.000000000 -0400
27533 +++ linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-08-05 19:44:37.000000000 -0400
27534 @@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl
27535 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
27536
27537 /* ivtv instance counter */
27538 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
27539 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
27540
27541 /* Parameter declarations */
27542 static int cardtype[IVTV_MAX_CARDS];
27543 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.c linux-2.6.39.4/drivers/media/video/omap24xxcam.c
27544 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-05-19 00:06:34.000000000 -0400
27545 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-08-05 19:44:37.000000000 -0400
27546 @@ -403,7 +403,7 @@ static void omap24xxcam_vbq_complete(str
27547 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
27548
27549 do_gettimeofday(&vb->ts);
27550 - vb->field_count = atomic_add_return(2, &fh->field_count);
27551 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
27552 if (csr & csr_error) {
27553 vb->state = VIDEOBUF_ERROR;
27554 if (!atomic_read(&fh->cam->in_reset)) {
27555 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.h linux-2.6.39.4/drivers/media/video/omap24xxcam.h
27556 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-05-19 00:06:34.000000000 -0400
27557 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-08-05 19:44:37.000000000 -0400
27558 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
27559 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
27560 struct videobuf_queue vbq;
27561 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
27562 - atomic_t field_count; /* field counter for videobuf_buffer */
27563 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
27564 /* accessing cam here doesn't need serialisation: it's constant */
27565 struct omap24xxcam_device *cam;
27566 };
27567 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27568 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-19 00:06:34.000000000 -0400
27569 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-05 19:44:37.000000000 -0400
27570 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27571 u8 *eeprom;
27572 struct tveeprom tvdata;
27573
27574 + pax_track_stack();
27575 +
27576 memset(&tvdata,0,sizeof(tvdata));
27577
27578 eeprom = pvr2_eeprom_fetch(hdw);
27579 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
27580 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-05-19 00:06:34.000000000 -0400
27581 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-05 20:34:06.000000000 -0400
27582 @@ -196,7 +196,7 @@ struct pvr2_hdw {
27583
27584 /* I2C stuff */
27585 struct i2c_adapter i2c_adap;
27586 - struct i2c_algorithm i2c_algo;
27587 + i2c_algorithm_no_const i2c_algo;
27588 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
27589 int i2c_cx25840_hack_state;
27590 int i2c_linked;
27591 diff -urNp linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c
27592 --- linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-05-19 00:06:34.000000000 -0400
27593 +++ linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-05 19:44:37.000000000 -0400
27594 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27595 unsigned char localPAT[256];
27596 unsigned char localPMT[256];
27597
27598 + pax_track_stack();
27599 +
27600 /* Set video format - must be done first as it resets other settings */
27601 set_reg8(client, 0x41, h->video_format);
27602
27603 diff -urNp linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c
27604 --- linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-19 00:06:34.000000000 -0400
27605 +++ linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-05 19:44:37.000000000 -0400
27606 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27607 u8 tmp[512];
27608 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27609
27610 + pax_track_stack();
27611 +
27612 /* While any outstand message on the bus exists... */
27613 do {
27614
27615 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27616 u8 tmp[512];
27617 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27618
27619 + pax_track_stack();
27620 +
27621 while (loop) {
27622
27623 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27624 diff -urNp linux-2.6.39.4/drivers/media/video/timblogiw.c linux-2.6.39.4/drivers/media/video/timblogiw.c
27625 --- linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-05-19 00:06:34.000000000 -0400
27626 +++ linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-08-05 20:34:06.000000000 -0400
27627 @@ -746,7 +746,7 @@ static int timblogiw_mmap(struct file *f
27628
27629 /* Platform device functions */
27630
27631 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27632 +static __devinitdata struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27633 .vidioc_querycap = timblogiw_querycap,
27634 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27635 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27636 @@ -768,7 +768,7 @@ static __devinitconst struct v4l2_ioctl_
27637 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
27638 };
27639
27640 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
27641 +static __devinitdata struct v4l2_file_operations timblogiw_fops = {
27642 .owner = THIS_MODULE,
27643 .open = timblogiw_open,
27644 .release = timblogiw_close,
27645 diff -urNp linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c
27646 --- linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-05-19 00:06:34.000000000 -0400
27647 +++ linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-05 19:44:37.000000000 -0400
27648 @@ -799,6 +799,8 @@ static enum parse_state usbvision_parse_
27649 unsigned char rv, gv, bv;
27650 static unsigned char *Y, *U, *V;
27651
27652 + pax_track_stack();
27653 +
27654 frame = usbvision->cur_frame;
27655 image_size = frame->frmwidth * frame->frmheight;
27656 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27657 diff -urNp linux-2.6.39.4/drivers/media/video/v4l2-device.c linux-2.6.39.4/drivers/media/video/v4l2-device.c
27658 --- linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-05-19 00:06:34.000000000 -0400
27659 +++ linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-08-05 19:44:37.000000000 -0400
27660 @@ -71,9 +71,9 @@ int v4l2_device_put(struct v4l2_device *
27661 EXPORT_SYMBOL_GPL(v4l2_device_put);
27662
27663 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
27664 - atomic_t *instance)
27665 + atomic_unchecked_t *instance)
27666 {
27667 - int num = atomic_inc_return(instance) - 1;
27668 + int num = atomic_inc_return_unchecked(instance) - 1;
27669 int len = strlen(basename);
27670
27671 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
27672 diff -urNp linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c
27673 --- linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-05-19 00:06:34.000000000 -0400
27674 +++ linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-08-05 19:44:37.000000000 -0400
27675 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27676 {
27677 struct videobuf_queue q;
27678
27679 + pax_track_stack();
27680 +
27681 /* Required to make generic handler to call __videobuf_alloc */
27682 q.int_ops = &sg_ops;
27683
27684 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptbase.c linux-2.6.39.4/drivers/message/fusion/mptbase.c
27685 --- linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-05-19 00:06:34.000000000 -0400
27686 +++ linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-08-05 20:34:06.000000000 -0400
27687 @@ -6683,8 +6683,13 @@ static int mpt_iocinfo_proc_show(struct
27688 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27689 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27690
27691 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27692 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27693 +#else
27694 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27695 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27696 +#endif
27697 +
27698 /*
27699 * Rounding UP to nearest 4-kB boundary here...
27700 */
27701 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptsas.c linux-2.6.39.4/drivers/message/fusion/mptsas.c
27702 --- linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-05-19 00:06:34.000000000 -0400
27703 +++ linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-08-05 19:44:37.000000000 -0400
27704 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27705 return 0;
27706 }
27707
27708 +static inline void
27709 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27710 +{
27711 + if (phy_info->port_details) {
27712 + phy_info->port_details->rphy = rphy;
27713 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27714 + ioc->name, rphy));
27715 + }
27716 +
27717 + if (rphy) {
27718 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27719 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27720 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27721 + ioc->name, rphy, rphy->dev.release));
27722 + }
27723 +}
27724 +
27725 /* no mutex */
27726 static void
27727 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27728 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27729 return NULL;
27730 }
27731
27732 -static inline void
27733 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27734 -{
27735 - if (phy_info->port_details) {
27736 - phy_info->port_details->rphy = rphy;
27737 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27738 - ioc->name, rphy));
27739 - }
27740 -
27741 - if (rphy) {
27742 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27743 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27744 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27745 - ioc->name, rphy, rphy->dev.release));
27746 - }
27747 -}
27748 -
27749 static inline struct sas_port *
27750 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27751 {
27752 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptscsih.c linux-2.6.39.4/drivers/message/fusion/mptscsih.c
27753 --- linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-05-19 00:06:34.000000000 -0400
27754 +++ linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-08-05 19:44:37.000000000 -0400
27755 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27756
27757 h = shost_priv(SChost);
27758
27759 - if (h) {
27760 - if (h->info_kbuf == NULL)
27761 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27762 - return h->info_kbuf;
27763 - h->info_kbuf[0] = '\0';
27764 + if (!h)
27765 + return NULL;
27766
27767 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27768 - h->info_kbuf[size-1] = '\0';
27769 - }
27770 + if (h->info_kbuf == NULL)
27771 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27772 + return h->info_kbuf;
27773 + h->info_kbuf[0] = '\0';
27774 +
27775 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27776 + h->info_kbuf[size-1] = '\0';
27777
27778 return h->info_kbuf;
27779 }
27780 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_config.c linux-2.6.39.4/drivers/message/i2o/i2o_config.c
27781 --- linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-05-19 00:06:34.000000000 -0400
27782 +++ linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-08-05 19:44:37.000000000 -0400
27783 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27784 struct i2o_message *msg;
27785 unsigned int iop;
27786
27787 + pax_track_stack();
27788 +
27789 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27790 return -EFAULT;
27791
27792 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_proc.c linux-2.6.39.4/drivers/message/i2o/i2o_proc.c
27793 --- linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-05-19 00:06:34.000000000 -0400
27794 +++ linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-08-05 19:44:37.000000000 -0400
27795 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27796 "Array Controller Device"
27797 };
27798
27799 -static char *chtostr(u8 * chars, int n)
27800 -{
27801 - char tmp[256];
27802 - tmp[0] = 0;
27803 - return strncat(tmp, (char *)chars, n);
27804 -}
27805 -
27806 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27807 char *group)
27808 {
27809 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27810
27811 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27812 seq_printf(seq, "%-#8x", ddm_table.module_id);
27813 - seq_printf(seq, "%-29s",
27814 - chtostr(ddm_table.module_name_version, 28));
27815 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27816 seq_printf(seq, "%9d ", ddm_table.data_size);
27817 seq_printf(seq, "%8d", ddm_table.code_size);
27818
27819 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27820
27821 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27822 seq_printf(seq, "%-#8x", dst->module_id);
27823 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27824 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27825 + seq_printf(seq, "%-.28s", dst->module_name_version);
27826 + seq_printf(seq, "%-.8s", dst->date);
27827 seq_printf(seq, "%8d ", dst->module_size);
27828 seq_printf(seq, "%8d ", dst->mpb_size);
27829 seq_printf(seq, "0x%04x", dst->module_flags);
27830 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27831 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27832 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27833 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27834 - seq_printf(seq, "Vendor info : %s\n",
27835 - chtostr((u8 *) (work32 + 2), 16));
27836 - seq_printf(seq, "Product info : %s\n",
27837 - chtostr((u8 *) (work32 + 6), 16));
27838 - seq_printf(seq, "Description : %s\n",
27839 - chtostr((u8 *) (work32 + 10), 16));
27840 - seq_printf(seq, "Product rev. : %s\n",
27841 - chtostr((u8 *) (work32 + 14), 8));
27842 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27843 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27844 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27845 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27846
27847 seq_printf(seq, "Serial number : ");
27848 print_serial_number(seq, (u8 *) (work32 + 16),
27849 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27850 }
27851
27852 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27853 - seq_printf(seq, "Module name : %s\n",
27854 - chtostr(result.module_name, 24));
27855 - seq_printf(seq, "Module revision : %s\n",
27856 - chtostr(result.module_rev, 8));
27857 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27858 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27859
27860 seq_printf(seq, "Serial number : ");
27861 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27862 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27863 return 0;
27864 }
27865
27866 - seq_printf(seq, "Device name : %s\n",
27867 - chtostr(result.device_name, 64));
27868 - seq_printf(seq, "Service name : %s\n",
27869 - chtostr(result.service_name, 64));
27870 - seq_printf(seq, "Physical name : %s\n",
27871 - chtostr(result.physical_location, 64));
27872 - seq_printf(seq, "Instance number : %s\n",
27873 - chtostr(result.instance_number, 4));
27874 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27875 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27876 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27877 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27878
27879 return 0;
27880 }
27881 diff -urNp linux-2.6.39.4/drivers/message/i2o/iop.c linux-2.6.39.4/drivers/message/i2o/iop.c
27882 --- linux-2.6.39.4/drivers/message/i2o/iop.c 2011-05-19 00:06:34.000000000 -0400
27883 +++ linux-2.6.39.4/drivers/message/i2o/iop.c 2011-08-05 19:44:37.000000000 -0400
27884 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27885
27886 spin_lock_irqsave(&c->context_list_lock, flags);
27887
27888 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27889 - atomic_inc(&c->context_list_counter);
27890 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27891 + atomic_inc_unchecked(&c->context_list_counter);
27892
27893 - entry->context = atomic_read(&c->context_list_counter);
27894 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27895
27896 list_add(&entry->list, &c->context_list);
27897
27898 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27899
27900 #if BITS_PER_LONG == 64
27901 spin_lock_init(&c->context_list_lock);
27902 - atomic_set(&c->context_list_counter, 0);
27903 + atomic_set_unchecked(&c->context_list_counter, 0);
27904 INIT_LIST_HEAD(&c->context_list);
27905 #endif
27906
27907 diff -urNp linux-2.6.39.4/drivers/mfd/abx500-core.c linux-2.6.39.4/drivers/mfd/abx500-core.c
27908 --- linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-05-19 00:06:34.000000000 -0400
27909 +++ linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-08-05 20:34:06.000000000 -0400
27910 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27911
27912 struct abx500_device_entry {
27913 struct list_head list;
27914 - struct abx500_ops ops;
27915 + abx500_ops_no_const ops;
27916 struct device *dev;
27917 };
27918
27919 diff -urNp linux-2.6.39.4/drivers/mfd/janz-cmodio.c linux-2.6.39.4/drivers/mfd/janz-cmodio.c
27920 --- linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-05-19 00:06:34.000000000 -0400
27921 +++ linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-08-05 19:44:37.000000000 -0400
27922 @@ -13,6 +13,7 @@
27923
27924 #include <linux/kernel.h>
27925 #include <linux/module.h>
27926 +#include <linux/slab.h>
27927 #include <linux/init.h>
27928 #include <linux/pci.h>
27929 #include <linux/interrupt.h>
27930 diff -urNp linux-2.6.39.4/drivers/mfd/wm8350-i2c.c linux-2.6.39.4/drivers/mfd/wm8350-i2c.c
27931 --- linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-05-19 00:06:34.000000000 -0400
27932 +++ linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-08-05 19:44:37.000000000 -0400
27933 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27934 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27935 int ret;
27936
27937 + pax_track_stack();
27938 +
27939 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27940 return -EINVAL;
27941
27942 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c
27943 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-05-19 00:06:34.000000000 -0400
27944 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-05 19:44:37.000000000 -0400
27945 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27946 * the lid is closed. This leads to interrupts as soon as a little move
27947 * is done.
27948 */
27949 - atomic_inc(&lis3_dev.count);
27950 + atomic_inc_unchecked(&lis3_dev.count);
27951
27952 wake_up_interruptible(&lis3_dev.misc_wait);
27953 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27954 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27955 if (lis3_dev.pm_dev)
27956 pm_runtime_get_sync(lis3_dev.pm_dev);
27957
27958 - atomic_set(&lis3_dev.count, 0);
27959 + atomic_set_unchecked(&lis3_dev.count, 0);
27960 return 0;
27961 }
27962
27963 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
27964 add_wait_queue(&lis3_dev.misc_wait, &wait);
27965 while (true) {
27966 set_current_state(TASK_INTERRUPTIBLE);
27967 - data = atomic_xchg(&lis3_dev.count, 0);
27968 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
27969 if (data)
27970 break;
27971
27972 @@ -583,7 +583,7 @@ out:
27973 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
27974 {
27975 poll_wait(file, &lis3_dev.misc_wait, wait);
27976 - if (atomic_read(&lis3_dev.count))
27977 + if (atomic_read_unchecked(&lis3_dev.count))
27978 return POLLIN | POLLRDNORM;
27979 return 0;
27980 }
27981 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h
27982 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-05-19 00:06:34.000000000 -0400
27983 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-05 19:44:37.000000000 -0400
27984 @@ -265,7 +265,7 @@ struct lis3lv02d {
27985 struct input_polled_dev *idev; /* input device */
27986 struct platform_device *pdev; /* platform device */
27987 struct regulator_bulk_data regulators[2];
27988 - atomic_t count; /* interrupt count after last read */
27989 + atomic_unchecked_t count; /* interrupt count after last read */
27990 union axis_conversion ac; /* hw -> logical axis */
27991 int mapped_btns[3];
27992
27993 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c
27994 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-05-19 00:06:34.000000000 -0400
27995 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-05 19:44:37.000000000 -0400
27996 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
27997 unsigned long nsec;
27998
27999 nsec = CLKS2NSEC(clks);
28000 - atomic_long_inc(&mcs_op_statistics[op].count);
28001 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
28002 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28003 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28004 if (mcs_op_statistics[op].max < nsec)
28005 mcs_op_statistics[op].max = nsec;
28006 }
28007 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c
28008 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-05-19 00:06:34.000000000 -0400
28009 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-05 19:44:37.000000000 -0400
28010 @@ -32,9 +32,9 @@
28011
28012 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28013
28014 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28015 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28016 {
28017 - unsigned long val = atomic_long_read(v);
28018 + unsigned long val = atomic_long_read_unchecked(v);
28019
28020 seq_printf(s, "%16lu %s\n", val, id);
28021 }
28022 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28023
28024 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28025 for (op = 0; op < mcsop_last; op++) {
28026 - count = atomic_long_read(&mcs_op_statistics[op].count);
28027 - total = atomic_long_read(&mcs_op_statistics[op].total);
28028 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28029 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28030 max = mcs_op_statistics[op].max;
28031 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28032 count ? total / count : 0, max);
28033 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h
28034 --- linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-05-19 00:06:34.000000000 -0400
28035 +++ linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-08-05 19:44:37.000000000 -0400
28036 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28037 * GRU statistics.
28038 */
28039 struct gru_stats_s {
28040 - atomic_long_t vdata_alloc;
28041 - atomic_long_t vdata_free;
28042 - atomic_long_t gts_alloc;
28043 - atomic_long_t gts_free;
28044 - atomic_long_t gms_alloc;
28045 - atomic_long_t gms_free;
28046 - atomic_long_t gts_double_allocate;
28047 - atomic_long_t assign_context;
28048 - atomic_long_t assign_context_failed;
28049 - atomic_long_t free_context;
28050 - atomic_long_t load_user_context;
28051 - atomic_long_t load_kernel_context;
28052 - atomic_long_t lock_kernel_context;
28053 - atomic_long_t unlock_kernel_context;
28054 - atomic_long_t steal_user_context;
28055 - atomic_long_t steal_kernel_context;
28056 - atomic_long_t steal_context_failed;
28057 - atomic_long_t nopfn;
28058 - atomic_long_t asid_new;
28059 - atomic_long_t asid_next;
28060 - atomic_long_t asid_wrap;
28061 - atomic_long_t asid_reuse;
28062 - atomic_long_t intr;
28063 - atomic_long_t intr_cbr;
28064 - atomic_long_t intr_tfh;
28065 - atomic_long_t intr_spurious;
28066 - atomic_long_t intr_mm_lock_failed;
28067 - atomic_long_t call_os;
28068 - atomic_long_t call_os_wait_queue;
28069 - atomic_long_t user_flush_tlb;
28070 - atomic_long_t user_unload_context;
28071 - atomic_long_t user_exception;
28072 - atomic_long_t set_context_option;
28073 - atomic_long_t check_context_retarget_intr;
28074 - atomic_long_t check_context_unload;
28075 - atomic_long_t tlb_dropin;
28076 - atomic_long_t tlb_preload_page;
28077 - atomic_long_t tlb_dropin_fail_no_asid;
28078 - atomic_long_t tlb_dropin_fail_upm;
28079 - atomic_long_t tlb_dropin_fail_invalid;
28080 - atomic_long_t tlb_dropin_fail_range_active;
28081 - atomic_long_t tlb_dropin_fail_idle;
28082 - atomic_long_t tlb_dropin_fail_fmm;
28083 - atomic_long_t tlb_dropin_fail_no_exception;
28084 - atomic_long_t tfh_stale_on_fault;
28085 - atomic_long_t mmu_invalidate_range;
28086 - atomic_long_t mmu_invalidate_page;
28087 - atomic_long_t flush_tlb;
28088 - atomic_long_t flush_tlb_gru;
28089 - atomic_long_t flush_tlb_gru_tgh;
28090 - atomic_long_t flush_tlb_gru_zero_asid;
28091 -
28092 - atomic_long_t copy_gpa;
28093 - atomic_long_t read_gpa;
28094 -
28095 - atomic_long_t mesq_receive;
28096 - atomic_long_t mesq_receive_none;
28097 - atomic_long_t mesq_send;
28098 - atomic_long_t mesq_send_failed;
28099 - atomic_long_t mesq_noop;
28100 - atomic_long_t mesq_send_unexpected_error;
28101 - atomic_long_t mesq_send_lb_overflow;
28102 - atomic_long_t mesq_send_qlimit_reached;
28103 - atomic_long_t mesq_send_amo_nacked;
28104 - atomic_long_t mesq_send_put_nacked;
28105 - atomic_long_t mesq_page_overflow;
28106 - atomic_long_t mesq_qf_locked;
28107 - atomic_long_t mesq_qf_noop_not_full;
28108 - atomic_long_t mesq_qf_switch_head_failed;
28109 - atomic_long_t mesq_qf_unexpected_error;
28110 - atomic_long_t mesq_noop_unexpected_error;
28111 - atomic_long_t mesq_noop_lb_overflow;
28112 - atomic_long_t mesq_noop_qlimit_reached;
28113 - atomic_long_t mesq_noop_amo_nacked;
28114 - atomic_long_t mesq_noop_put_nacked;
28115 - atomic_long_t mesq_noop_page_overflow;
28116 + atomic_long_unchecked_t vdata_alloc;
28117 + atomic_long_unchecked_t vdata_free;
28118 + atomic_long_unchecked_t gts_alloc;
28119 + atomic_long_unchecked_t gts_free;
28120 + atomic_long_unchecked_t gms_alloc;
28121 + atomic_long_unchecked_t gms_free;
28122 + atomic_long_unchecked_t gts_double_allocate;
28123 + atomic_long_unchecked_t assign_context;
28124 + atomic_long_unchecked_t assign_context_failed;
28125 + atomic_long_unchecked_t free_context;
28126 + atomic_long_unchecked_t load_user_context;
28127 + atomic_long_unchecked_t load_kernel_context;
28128 + atomic_long_unchecked_t lock_kernel_context;
28129 + atomic_long_unchecked_t unlock_kernel_context;
28130 + atomic_long_unchecked_t steal_user_context;
28131 + atomic_long_unchecked_t steal_kernel_context;
28132 + atomic_long_unchecked_t steal_context_failed;
28133 + atomic_long_unchecked_t nopfn;
28134 + atomic_long_unchecked_t asid_new;
28135 + atomic_long_unchecked_t asid_next;
28136 + atomic_long_unchecked_t asid_wrap;
28137 + atomic_long_unchecked_t asid_reuse;
28138 + atomic_long_unchecked_t intr;
28139 + atomic_long_unchecked_t intr_cbr;
28140 + atomic_long_unchecked_t intr_tfh;
28141 + atomic_long_unchecked_t intr_spurious;
28142 + atomic_long_unchecked_t intr_mm_lock_failed;
28143 + atomic_long_unchecked_t call_os;
28144 + atomic_long_unchecked_t call_os_wait_queue;
28145 + atomic_long_unchecked_t user_flush_tlb;
28146 + atomic_long_unchecked_t user_unload_context;
28147 + atomic_long_unchecked_t user_exception;
28148 + atomic_long_unchecked_t set_context_option;
28149 + atomic_long_unchecked_t check_context_retarget_intr;
28150 + atomic_long_unchecked_t check_context_unload;
28151 + atomic_long_unchecked_t tlb_dropin;
28152 + atomic_long_unchecked_t tlb_preload_page;
28153 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28154 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28155 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28156 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28157 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28158 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28159 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28160 + atomic_long_unchecked_t tfh_stale_on_fault;
28161 + atomic_long_unchecked_t mmu_invalidate_range;
28162 + atomic_long_unchecked_t mmu_invalidate_page;
28163 + atomic_long_unchecked_t flush_tlb;
28164 + atomic_long_unchecked_t flush_tlb_gru;
28165 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28166 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28167 +
28168 + atomic_long_unchecked_t copy_gpa;
28169 + atomic_long_unchecked_t read_gpa;
28170 +
28171 + atomic_long_unchecked_t mesq_receive;
28172 + atomic_long_unchecked_t mesq_receive_none;
28173 + atomic_long_unchecked_t mesq_send;
28174 + atomic_long_unchecked_t mesq_send_failed;
28175 + atomic_long_unchecked_t mesq_noop;
28176 + atomic_long_unchecked_t mesq_send_unexpected_error;
28177 + atomic_long_unchecked_t mesq_send_lb_overflow;
28178 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28179 + atomic_long_unchecked_t mesq_send_amo_nacked;
28180 + atomic_long_unchecked_t mesq_send_put_nacked;
28181 + atomic_long_unchecked_t mesq_page_overflow;
28182 + atomic_long_unchecked_t mesq_qf_locked;
28183 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28184 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28185 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28186 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28187 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28188 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28189 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28190 + atomic_long_unchecked_t mesq_noop_put_nacked;
28191 + atomic_long_unchecked_t mesq_noop_page_overflow;
28192
28193 };
28194
28195 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28196 tghop_invalidate, mcsop_last};
28197
28198 struct mcs_op_statistic {
28199 - atomic_long_t count;
28200 - atomic_long_t total;
28201 + atomic_long_unchecked_t count;
28202 + atomic_long_unchecked_t total;
28203 unsigned long max;
28204 };
28205
28206 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28207
28208 #define STAT(id) do { \
28209 if (gru_options & OPT_STATS) \
28210 - atomic_long_inc(&gru_stats.id); \
28211 + atomic_long_inc_unchecked(&gru_stats.id); \
28212 } while (0)
28213
28214 #ifdef CONFIG_SGI_GRU_DEBUG
28215 diff -urNp linux-2.6.39.4/drivers/misc/sgi-xp/xp.h linux-2.6.39.4/drivers/misc/sgi-xp/xp.h
28216 --- linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-05-19 00:06:34.000000000 -0400
28217 +++ linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-08-05 20:34:06.000000000 -0400
28218 @@ -289,7 +289,7 @@ struct xpc_interface {
28219 xpc_notify_func, void *);
28220 void (*received) (short, int, void *);
28221 enum xp_retval (*partid_to_nasids) (short, void *);
28222 -};
28223 +} __no_const;
28224
28225 extern struct xpc_interface xpc_interface;
28226
28227 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c
28228 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-19 00:06:34.000000000 -0400
28229 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-05 19:44:37.000000000 -0400
28230 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28231 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28232 unsigned long timeo = jiffies + HZ;
28233
28234 + pax_track_stack();
28235 +
28236 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28237 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28238 goto sleep;
28239 @@ -1657,6 +1659,8 @@ static int __xipram do_write_buffer(stru
28240 unsigned long initial_adr;
28241 int initial_len = len;
28242
28243 + pax_track_stack();
28244 +
28245 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28246 adr += chip->start;
28247 initial_adr = adr;
28248 @@ -1875,6 +1879,8 @@ static int __xipram do_erase_oneblock(st
28249 int retries = 3;
28250 int ret;
28251
28252 + pax_track_stack();
28253 +
28254 adr += chip->start;
28255
28256 retry:
28257 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c
28258 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-19 00:06:34.000000000 -0400
28259 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-05 19:44:37.000000000 -0400
28260 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28261 unsigned long cmd_addr;
28262 struct cfi_private *cfi = map->fldrv_priv;
28263
28264 + pax_track_stack();
28265 +
28266 adr += chip->start;
28267
28268 /* Ensure cmd read/writes are aligned. */
28269 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
28270 DECLARE_WAITQUEUE(wait, current);
28271 int wbufsize, z;
28272
28273 + pax_track_stack();
28274 +
28275 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28276 if (adr & (map_bankwidth(map)-1))
28277 return -EINVAL;
28278 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
28279 DECLARE_WAITQUEUE(wait, current);
28280 int ret = 0;
28281
28282 + pax_track_stack();
28283 +
28284 adr += chip->start;
28285
28286 /* Let's determine this according to the interleave only once */
28287 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
28288 unsigned long timeo = jiffies + HZ;
28289 DECLARE_WAITQUEUE(wait, current);
28290
28291 + pax_track_stack();
28292 +
28293 adr += chip->start;
28294
28295 /* Let's determine this according to the interleave only once */
28296 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
28297 unsigned long timeo = jiffies + HZ;
28298 DECLARE_WAITQUEUE(wait, current);
28299
28300 + pax_track_stack();
28301 +
28302 adr += chip->start;
28303
28304 /* Let's determine this according to the interleave only once */
28305 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2000.c linux-2.6.39.4/drivers/mtd/devices/doc2000.c
28306 --- linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-05-19 00:06:34.000000000 -0400
28307 +++ linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-08-05 19:44:37.000000000 -0400
28308 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28309
28310 /* The ECC will not be calculated correctly if less than 512 is written */
28311 /* DBB-
28312 - if (len != 0x200 && eccbuf)
28313 + if (len != 0x200)
28314 printk(KERN_WARNING
28315 "ECC needs a full sector write (adr: %lx size %lx)\n",
28316 (long) to, (long) len);
28317 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2001.c linux-2.6.39.4/drivers/mtd/devices/doc2001.c
28318 --- linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-05-19 00:06:34.000000000 -0400
28319 +++ linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-08-05 19:44:37.000000000 -0400
28320 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28321 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28322
28323 /* Don't allow read past end of device */
28324 - if (from >= this->totlen)
28325 + if (from >= this->totlen || !len)
28326 return -EINVAL;
28327
28328 /* Don't allow a single read to cross a 512-byte block boundary */
28329 diff -urNp linux-2.6.39.4/drivers/mtd/ftl.c linux-2.6.39.4/drivers/mtd/ftl.c
28330 --- linux-2.6.39.4/drivers/mtd/ftl.c 2011-05-19 00:06:34.000000000 -0400
28331 +++ linux-2.6.39.4/drivers/mtd/ftl.c 2011-08-05 19:44:37.000000000 -0400
28332 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28333 loff_t offset;
28334 uint16_t srcunitswap = cpu_to_le16(srcunit);
28335
28336 + pax_track_stack();
28337 +
28338 eun = &part->EUNInfo[srcunit];
28339 xfer = &part->XferInfo[xferunit];
28340 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28341 diff -urNp linux-2.6.39.4/drivers/mtd/inftlcore.c linux-2.6.39.4/drivers/mtd/inftlcore.c
28342 --- linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-05-19 00:06:34.000000000 -0400
28343 +++ linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-08-05 19:44:37.000000000 -0400
28344 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28345 struct inftl_oob oob;
28346 size_t retlen;
28347
28348 + pax_track_stack();
28349 +
28350 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28351 "pending=%d)\n", inftl, thisVUC, pendingblock);
28352
28353 diff -urNp linux-2.6.39.4/drivers/mtd/inftlmount.c linux-2.6.39.4/drivers/mtd/inftlmount.c
28354 --- linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-05-19 00:06:34.000000000 -0400
28355 +++ linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-08-05 19:44:37.000000000 -0400
28356 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28357 struct INFTLPartition *ip;
28358 size_t retlen;
28359
28360 + pax_track_stack();
28361 +
28362 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28363
28364 /*
28365 diff -urNp linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c
28366 --- linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-05-19 00:06:34.000000000 -0400
28367 +++ linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-05 19:44:37.000000000 -0400
28368 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28369 {
28370 map_word pfow_val[4];
28371
28372 + pax_track_stack();
28373 +
28374 /* Check identification string */
28375 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28376 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28377 diff -urNp linux-2.6.39.4/drivers/mtd/mtdchar.c linux-2.6.39.4/drivers/mtd/mtdchar.c
28378 --- linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-05-19 00:06:34.000000000 -0400
28379 +++ linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-08-05 19:44:37.000000000 -0400
28380 @@ -560,6 +560,8 @@ static int mtd_ioctl(struct file *file,
28381 u_long size;
28382 struct mtd_info_user info;
28383
28384 + pax_track_stack();
28385 +
28386 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28387
28388 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28389 diff -urNp linux-2.6.39.4/drivers/mtd/nand/denali.c linux-2.6.39.4/drivers/mtd/nand/denali.c
28390 --- linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-05-19 00:06:34.000000000 -0400
28391 +++ linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-08-05 19:44:37.000000000 -0400
28392 @@ -25,6 +25,7 @@
28393 #include <linux/pci.h>
28394 #include <linux/mtd/mtd.h>
28395 #include <linux/module.h>
28396 +#include <linux/slab.h>
28397
28398 #include "denali.h"
28399
28400 diff -urNp linux-2.6.39.4/drivers/mtd/nftlcore.c linux-2.6.39.4/drivers/mtd/nftlcore.c
28401 --- linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-05-19 00:06:34.000000000 -0400
28402 +++ linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-08-05 19:44:37.000000000 -0400
28403 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28404 int inplace = 1;
28405 size_t retlen;
28406
28407 + pax_track_stack();
28408 +
28409 memset(BlockMap, 0xff, sizeof(BlockMap));
28410 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28411
28412 diff -urNp linux-2.6.39.4/drivers/mtd/nftlmount.c linux-2.6.39.4/drivers/mtd/nftlmount.c
28413 --- linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-05-19 00:06:34.000000000 -0400
28414 +++ linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-08-05 19:44:37.000000000 -0400
28415 @@ -24,6 +24,7 @@
28416 #include <asm/errno.h>
28417 #include <linux/delay.h>
28418 #include <linux/slab.h>
28419 +#include <linux/sched.h>
28420 #include <linux/mtd/mtd.h>
28421 #include <linux/mtd/nand.h>
28422 #include <linux/mtd/nftl.h>
28423 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28424 struct mtd_info *mtd = nftl->mbd.mtd;
28425 unsigned int i;
28426
28427 + pax_track_stack();
28428 +
28429 /* Assume logical EraseSize == physical erasesize for starting the scan.
28430 We'll sort it out later if we find a MediaHeader which says otherwise */
28431 /* Actually, we won't. The new DiskOnChip driver has already scanned
28432 diff -urNp linux-2.6.39.4/drivers/mtd/ubi/build.c linux-2.6.39.4/drivers/mtd/ubi/build.c
28433 --- linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-05-19 00:06:34.000000000 -0400
28434 +++ linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-08-05 19:44:37.000000000 -0400
28435 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28436 static int __init bytes_str_to_int(const char *str)
28437 {
28438 char *endp;
28439 - unsigned long result;
28440 + unsigned long result, scale = 1;
28441
28442 result = simple_strtoul(str, &endp, 0);
28443 if (str == endp || result >= INT_MAX) {
28444 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28445
28446 switch (*endp) {
28447 case 'G':
28448 - result *= 1024;
28449 + scale *= 1024;
28450 case 'M':
28451 - result *= 1024;
28452 + scale *= 1024;
28453 case 'K':
28454 - result *= 1024;
28455 + scale *= 1024;
28456 if (endp[1] == 'i' && endp[2] == 'B')
28457 endp += 2;
28458 case '\0':
28459 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28460 return -EINVAL;
28461 }
28462
28463 - return result;
28464 + if ((intoverflow_t)result*scale >= INT_MAX) {
28465 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28466 + str);
28467 + return -EINVAL;
28468 + }
28469 +
28470 + return result*scale;
28471 }
28472
28473 /**
28474 diff -urNp linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c
28475 --- linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-05-19 00:06:34.000000000 -0400
28476 +++ linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-05 20:34:06.000000000 -0400
28477 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28478 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28479 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28480
28481 -static struct bfa_ioc_hwif nw_hwif_ct;
28482 +static struct bfa_ioc_hwif nw_hwif_ct = {
28483 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28484 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28485 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28486 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28487 + .ioc_map_port = bfa_ioc_ct_map_port,
28488 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28489 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28490 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28491 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28492 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28493 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28494 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28495 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28496 +};
28497
28498 /**
28499 * Called from bfa_ioc_attach() to map asic specific calls.
28500 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28501 void
28502 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28503 {
28504 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28505 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28506 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28507 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28508 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28509 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28510 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28511 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28512 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28513 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28514 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28515 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28516 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28517 -
28518 ioc->ioc_hwif = &nw_hwif_ct;
28519 }
28520
28521 diff -urNp linux-2.6.39.4/drivers/net/bna/bnad.c linux-2.6.39.4/drivers/net/bna/bnad.c
28522 --- linux-2.6.39.4/drivers/net/bna/bnad.c 2011-05-19 00:06:34.000000000 -0400
28523 +++ linux-2.6.39.4/drivers/net/bna/bnad.c 2011-08-05 20:34:06.000000000 -0400
28524 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28525 struct bna_intr_info *intr_info =
28526 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28527 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28528 - struct bna_tx_event_cbfn tx_cbfn;
28529 + static struct bna_tx_event_cbfn tx_cbfn = {
28530 + /* Initialize the tx event handlers */
28531 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28532 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28533 + .tx_stall_cbfn = bnad_cb_tx_stall,
28534 + .tx_resume_cbfn = bnad_cb_tx_resume,
28535 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28536 + };
28537 struct bna_tx *tx;
28538 unsigned long flags;
28539
28540 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28541 tx_config->txq_depth = bnad->txq_depth;
28542 tx_config->tx_type = BNA_TX_T_REGULAR;
28543
28544 - /* Initialize the tx event handlers */
28545 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28546 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28547 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28548 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28549 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28550 -
28551 /* Get BNA's resource requirement for one tx object */
28552 spin_lock_irqsave(&bnad->bna_lock, flags);
28553 bna_tx_res_req(bnad->num_txq_per_tx,
28554 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28555 struct bna_intr_info *intr_info =
28556 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28557 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28558 - struct bna_rx_event_cbfn rx_cbfn;
28559 + static struct bna_rx_event_cbfn rx_cbfn = {
28560 + /* Initialize the Rx event handlers */
28561 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28562 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28563 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28564 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28565 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28566 + .rx_post_cbfn = bnad_cb_rx_post
28567 + };
28568 struct bna_rx *rx;
28569 unsigned long flags;
28570
28571 /* Initialize the Rx object configuration */
28572 bnad_init_rx_config(bnad, rx_config);
28573
28574 - /* Initialize the Rx event handlers */
28575 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28576 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28577 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28578 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28579 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28580 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28581 -
28582 /* Get BNA's resource requirement for one Rx object */
28583 spin_lock_irqsave(&bnad->bna_lock, flags);
28584 bna_rx_res_req(rx_config, res_info);
28585 diff -urNp linux-2.6.39.4/drivers/net/bnx2.c linux-2.6.39.4/drivers/net/bnx2.c
28586 --- linux-2.6.39.4/drivers/net/bnx2.c 2011-05-19 00:06:34.000000000 -0400
28587 +++ linux-2.6.39.4/drivers/net/bnx2.c 2011-08-05 19:44:37.000000000 -0400
28588 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28589 int rc = 0;
28590 u32 magic, csum;
28591
28592 + pax_track_stack();
28593 +
28594 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28595 goto test_nvram_done;
28596
28597 diff -urNp linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c
28598 --- linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-05-19 00:06:34.000000000 -0400
28599 +++ linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-05 19:44:37.000000000 -0400
28600 @@ -1788,6 +1788,8 @@ static int bnx2x_test_nvram(struct bnx2x
28601 int i, rc;
28602 u32 magic, crc;
28603
28604 + pax_track_stack();
28605 +
28606 if (BP_NOMCP(bp))
28607 return 0;
28608
28609 diff -urNp linux-2.6.39.4/drivers/net/cxgb3/l2t.h linux-2.6.39.4/drivers/net/cxgb3/l2t.h
28610 --- linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-05-19 00:06:34.000000000 -0400
28611 +++ linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-08-05 20:34:06.000000000 -0400
28612 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28613 */
28614 struct l2t_skb_cb {
28615 arp_failure_handler_func arp_failure_handler;
28616 -};
28617 +} __no_const;
28618
28619 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28620
28621 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c
28622 --- linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-05-19 00:06:34.000000000 -0400
28623 +++ linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-05 19:44:37.000000000 -0400
28624 @@ -3428,6 +3428,8 @@ static int __devinit enable_msix(struct
28625 unsigned int nchan = adap->params.nports;
28626 struct msix_entry entries[MAX_INGQ + 1];
28627
28628 + pax_track_stack();
28629 +
28630 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28631 entries[i].entry = i;
28632
28633 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c
28634 --- linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-05-19 00:06:34.000000000 -0400
28635 +++ linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-08-05 19:44:37.000000000 -0400
28636 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28637 u8 vpd[VPD_LEN], csum;
28638 unsigned int vpdr_len, kw_offset, id_len;
28639
28640 + pax_track_stack();
28641 +
28642 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28643 if (ret < 0)
28644 return ret;
28645 diff -urNp linux-2.6.39.4/drivers/net/e1000e/82571.c linux-2.6.39.4/drivers/net/e1000e/82571.c
28646 --- linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-05-19 00:06:34.000000000 -0400
28647 +++ linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-08-05 20:34:06.000000000 -0400
28648 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28649 {
28650 struct e1000_hw *hw = &adapter->hw;
28651 struct e1000_mac_info *mac = &hw->mac;
28652 - struct e1000_mac_operations *func = &mac->ops;
28653 + e1000_mac_operations_no_const *func = &mac->ops;
28654 u32 swsm = 0;
28655 u32 swsm2 = 0;
28656 bool force_clear_smbi = false;
28657 diff -urNp linux-2.6.39.4/drivers/net/e1000e/es2lan.c linux-2.6.39.4/drivers/net/e1000e/es2lan.c
28658 --- linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-05-19 00:06:34.000000000 -0400
28659 +++ linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-08-05 20:34:06.000000000 -0400
28660 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28661 {
28662 struct e1000_hw *hw = &adapter->hw;
28663 struct e1000_mac_info *mac = &hw->mac;
28664 - struct e1000_mac_operations *func = &mac->ops;
28665 + e1000_mac_operations_no_const *func = &mac->ops;
28666
28667 /* Set media type */
28668 switch (adapter->pdev->device) {
28669 diff -urNp linux-2.6.39.4/drivers/net/e1000e/hw.h linux-2.6.39.4/drivers/net/e1000e/hw.h
28670 --- linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-05-19 00:06:34.000000000 -0400
28671 +++ linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-08-05 20:34:06.000000000 -0400
28672 @@ -775,6 +775,7 @@ struct e1000_mac_operations {
28673 void (*write_vfta)(struct e1000_hw *, u32, u32);
28674 s32 (*read_mac_addr)(struct e1000_hw *);
28675 };
28676 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28677
28678 /* Function pointers for the PHY. */
28679 struct e1000_phy_operations {
28680 @@ -798,6 +799,7 @@ struct e1000_phy_operations {
28681 void (*power_up)(struct e1000_hw *);
28682 void (*power_down)(struct e1000_hw *);
28683 };
28684 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28685
28686 /* Function pointers for the NVM. */
28687 struct e1000_nvm_operations {
28688 @@ -809,9 +811,10 @@ struct e1000_nvm_operations {
28689 s32 (*validate)(struct e1000_hw *);
28690 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28691 };
28692 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28693
28694 struct e1000_mac_info {
28695 - struct e1000_mac_operations ops;
28696 + e1000_mac_operations_no_const ops;
28697 u8 addr[ETH_ALEN];
28698 u8 perm_addr[ETH_ALEN];
28699
28700 @@ -852,7 +855,7 @@ struct e1000_mac_info {
28701 };
28702
28703 struct e1000_phy_info {
28704 - struct e1000_phy_operations ops;
28705 + e1000_phy_operations_no_const ops;
28706
28707 enum e1000_phy_type type;
28708
28709 @@ -886,7 +889,7 @@ struct e1000_phy_info {
28710 };
28711
28712 struct e1000_nvm_info {
28713 - struct e1000_nvm_operations ops;
28714 + e1000_nvm_operations_no_const ops;
28715
28716 enum e1000_nvm_type type;
28717 enum e1000_nvm_override override;
28718 diff -urNp linux-2.6.39.4/drivers/net/hamradio/6pack.c linux-2.6.39.4/drivers/net/hamradio/6pack.c
28719 --- linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-07-09 09:18:51.000000000 -0400
28720 +++ linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-08-05 19:44:37.000000000 -0400
28721 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28722 unsigned char buf[512];
28723 int count1;
28724
28725 + pax_track_stack();
28726 +
28727 if (!count)
28728 return;
28729
28730 diff -urNp linux-2.6.39.4/drivers/net/igb/e1000_hw.h linux-2.6.39.4/drivers/net/igb/e1000_hw.h
28731 --- linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-05-19 00:06:34.000000000 -0400
28732 +++ linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-08-05 20:34:06.000000000 -0400
28733 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28734 s32 (*read_mac_addr)(struct e1000_hw *);
28735 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28736 };
28737 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28738
28739 struct e1000_phy_operations {
28740 s32 (*acquire)(struct e1000_hw *);
28741 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28742 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28743 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28744 };
28745 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28746
28747 struct e1000_nvm_operations {
28748 s32 (*acquire)(struct e1000_hw *);
28749 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28750 s32 (*update)(struct e1000_hw *);
28751 s32 (*validate)(struct e1000_hw *);
28752 };
28753 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28754
28755 struct e1000_info {
28756 s32 (*get_invariants)(struct e1000_hw *);
28757 @@ -350,7 +353,7 @@ struct e1000_info {
28758 extern const struct e1000_info e1000_82575_info;
28759
28760 struct e1000_mac_info {
28761 - struct e1000_mac_operations ops;
28762 + e1000_mac_operations_no_const ops;
28763
28764 u8 addr[6];
28765 u8 perm_addr[6];
28766 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28767 };
28768
28769 struct e1000_phy_info {
28770 - struct e1000_phy_operations ops;
28771 + e1000_phy_operations_no_const ops;
28772
28773 enum e1000_phy_type type;
28774
28775 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28776 };
28777
28778 struct e1000_nvm_info {
28779 - struct e1000_nvm_operations ops;
28780 + e1000_nvm_operations_no_const ops;
28781 enum e1000_nvm_type type;
28782 enum e1000_nvm_override override;
28783
28784 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28785 s32 (*check_for_ack)(struct e1000_hw *, u16);
28786 s32 (*check_for_rst)(struct e1000_hw *, u16);
28787 };
28788 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28789
28790 struct e1000_mbx_stats {
28791 u32 msgs_tx;
28792 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28793 };
28794
28795 struct e1000_mbx_info {
28796 - struct e1000_mbx_operations ops;
28797 + e1000_mbx_operations_no_const ops;
28798 struct e1000_mbx_stats stats;
28799 u32 timeout;
28800 u32 usec_delay;
28801 diff -urNp linux-2.6.39.4/drivers/net/igbvf/vf.h linux-2.6.39.4/drivers/net/igbvf/vf.h
28802 --- linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-05-19 00:06:34.000000000 -0400
28803 +++ linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-08-05 20:34:06.000000000 -0400
28804 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28805 s32 (*read_mac_addr)(struct e1000_hw *);
28806 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28807 };
28808 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28809
28810 struct e1000_mac_info {
28811 - struct e1000_mac_operations ops;
28812 + e1000_mac_operations_no_const ops;
28813 u8 addr[6];
28814 u8 perm_addr[6];
28815
28816 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28817 s32 (*check_for_ack)(struct e1000_hw *);
28818 s32 (*check_for_rst)(struct e1000_hw *);
28819 };
28820 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28821
28822 struct e1000_mbx_stats {
28823 u32 msgs_tx;
28824 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28825 };
28826
28827 struct e1000_mbx_info {
28828 - struct e1000_mbx_operations ops;
28829 + e1000_mbx_operations_no_const ops;
28830 struct e1000_mbx_stats stats;
28831 u32 timeout;
28832 u32 usec_delay;
28833 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c
28834 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-05-19 00:06:34.000000000 -0400
28835 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-08-05 19:44:37.000000000 -0400
28836 @@ -1069,6 +1069,8 @@ ixgb_set_multi(struct net_device *netdev
28837 u32 rctl;
28838 int i;
28839
28840 + pax_track_stack();
28841 +
28842 /* Check for Promiscuous and All Multicast modes */
28843
28844 rctl = IXGB_READ_REG(hw, RCTL);
28845 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c
28846 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-05-19 00:06:34.000000000 -0400
28847 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-08-05 19:44:37.000000000 -0400
28848 @@ -261,6 +261,9 @@ void __devinit
28849 ixgb_check_options(struct ixgb_adapter *adapter)
28850 {
28851 int bd = adapter->bd_number;
28852 +
28853 + pax_track_stack();
28854 +
28855 if (bd >= IXGB_MAX_NIC) {
28856 pr_notice("Warning: no configuration for board #%i\n", bd);
28857 pr_notice("Using defaults for all values\n");
28858 diff -urNp linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h
28859 --- linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-05-19 00:06:34.000000000 -0400
28860 +++ linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-05 20:34:06.000000000 -0400
28861 @@ -2496,6 +2496,7 @@ struct ixgbe_eeprom_operations {
28862 s32 (*update_checksum)(struct ixgbe_hw *);
28863 u16 (*calc_checksum)(struct ixgbe_hw *);
28864 };
28865 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28866
28867 struct ixgbe_mac_operations {
28868 s32 (*init_hw)(struct ixgbe_hw *);
28869 @@ -2551,6 +2552,7 @@ struct ixgbe_mac_operations {
28870 /* Flow Control */
28871 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28872 };
28873 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28874
28875 struct ixgbe_phy_operations {
28876 s32 (*identify)(struct ixgbe_hw *);
28877 @@ -2570,9 +2572,10 @@ struct ixgbe_phy_operations {
28878 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28879 s32 (*check_overtemp)(struct ixgbe_hw *);
28880 };
28881 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28882
28883 struct ixgbe_eeprom_info {
28884 - struct ixgbe_eeprom_operations ops;
28885 + ixgbe_eeprom_operations_no_const ops;
28886 enum ixgbe_eeprom_type type;
28887 u32 semaphore_delay;
28888 u16 word_size;
28889 @@ -2581,7 +2584,7 @@ struct ixgbe_eeprom_info {
28890
28891 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28892 struct ixgbe_mac_info {
28893 - struct ixgbe_mac_operations ops;
28894 + ixgbe_mac_operations_no_const ops;
28895 enum ixgbe_mac_type type;
28896 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28897 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28898 @@ -2608,7 +2611,7 @@ struct ixgbe_mac_info {
28899 };
28900
28901 struct ixgbe_phy_info {
28902 - struct ixgbe_phy_operations ops;
28903 + ixgbe_phy_operations_no_const ops;
28904 struct mdio_if_info mdio;
28905 enum ixgbe_phy_type type;
28906 u32 id;
28907 @@ -2636,6 +2639,7 @@ struct ixgbe_mbx_operations {
28908 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28909 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28910 };
28911 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28912
28913 struct ixgbe_mbx_stats {
28914 u32 msgs_tx;
28915 @@ -2647,7 +2651,7 @@ struct ixgbe_mbx_stats {
28916 };
28917
28918 struct ixgbe_mbx_info {
28919 - struct ixgbe_mbx_operations ops;
28920 + ixgbe_mbx_operations_no_const ops;
28921 struct ixgbe_mbx_stats stats;
28922 u32 timeout;
28923 u32 usec_delay;
28924 diff -urNp linux-2.6.39.4/drivers/net/ixgbevf/vf.h linux-2.6.39.4/drivers/net/ixgbevf/vf.h
28925 --- linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-05-19 00:06:34.000000000 -0400
28926 +++ linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-08-05 20:34:06.000000000 -0400
28927 @@ -69,6 +69,7 @@ struct ixgbe_mac_operations {
28928 s32 (*clear_vfta)(struct ixgbe_hw *);
28929 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28930 };
28931 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28932
28933 enum ixgbe_mac_type {
28934 ixgbe_mac_unknown = 0,
28935 @@ -78,7 +79,7 @@ enum ixgbe_mac_type {
28936 };
28937
28938 struct ixgbe_mac_info {
28939 - struct ixgbe_mac_operations ops;
28940 + ixgbe_mac_operations_no_const ops;
28941 u8 addr[6];
28942 u8 perm_addr[6];
28943
28944 @@ -102,6 +103,7 @@ struct ixgbe_mbx_operations {
28945 s32 (*check_for_ack)(struct ixgbe_hw *);
28946 s32 (*check_for_rst)(struct ixgbe_hw *);
28947 };
28948 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28949
28950 struct ixgbe_mbx_stats {
28951 u32 msgs_tx;
28952 @@ -113,7 +115,7 @@ struct ixgbe_mbx_stats {
28953 };
28954
28955 struct ixgbe_mbx_info {
28956 - struct ixgbe_mbx_operations ops;
28957 + ixgbe_mbx_operations_no_const ops;
28958 struct ixgbe_mbx_stats stats;
28959 u32 timeout;
28960 u32 udelay;
28961 diff -urNp linux-2.6.39.4/drivers/net/ksz884x.c linux-2.6.39.4/drivers/net/ksz884x.c
28962 --- linux-2.6.39.4/drivers/net/ksz884x.c 2011-05-19 00:06:34.000000000 -0400
28963 +++ linux-2.6.39.4/drivers/net/ksz884x.c 2011-08-05 20:34:06.000000000 -0400
28964 @@ -6536,6 +6536,8 @@ static void netdev_get_ethtool_stats(str
28965 int rc;
28966 u64 counter[TOTAL_PORT_COUNTER_NUM];
28967
28968 + pax_track_stack();
28969 +
28970 mutex_lock(&hw_priv->lock);
28971 n = SWITCH_PORT_NUM;
28972 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
28973 diff -urNp linux-2.6.39.4/drivers/net/mlx4/main.c linux-2.6.39.4/drivers/net/mlx4/main.c
28974 --- linux-2.6.39.4/drivers/net/mlx4/main.c 2011-05-19 00:06:34.000000000 -0400
28975 +++ linux-2.6.39.4/drivers/net/mlx4/main.c 2011-08-05 19:44:37.000000000 -0400
28976 @@ -40,6 +40,7 @@
28977 #include <linux/dma-mapping.h>
28978 #include <linux/slab.h>
28979 #include <linux/io-mapping.h>
28980 +#include <linux/sched.h>
28981
28982 #include <linux/mlx4/device.h>
28983 #include <linux/mlx4/doorbell.h>
28984 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
28985 u64 icm_size;
28986 int err;
28987
28988 + pax_track_stack();
28989 +
28990 err = mlx4_QUERY_FW(dev);
28991 if (err) {
28992 if (err == -EACCES)
28993 diff -urNp linux-2.6.39.4/drivers/net/niu.c linux-2.6.39.4/drivers/net/niu.c
28994 --- linux-2.6.39.4/drivers/net/niu.c 2011-05-19 00:06:34.000000000 -0400
28995 +++ linux-2.6.39.4/drivers/net/niu.c 2011-08-05 19:44:37.000000000 -0400
28996 @@ -9067,6 +9067,8 @@ static void __devinit niu_try_msix(struc
28997 int i, num_irqs, err;
28998 u8 first_ldg;
28999
29000 + pax_track_stack();
29001 +
29002 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29003 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29004 ldg_num_map[i] = first_ldg + i;
29005 diff -urNp linux-2.6.39.4/drivers/net/pcnet32.c linux-2.6.39.4/drivers/net/pcnet32.c
29006 --- linux-2.6.39.4/drivers/net/pcnet32.c 2011-05-19 00:06:34.000000000 -0400
29007 +++ linux-2.6.39.4/drivers/net/pcnet32.c 2011-08-05 20:34:06.000000000 -0400
29008 @@ -82,7 +82,7 @@ static int cards_found;
29009 /*
29010 * VLB I/O addresses
29011 */
29012 -static unsigned int pcnet32_portlist[] __initdata =
29013 +static unsigned int pcnet32_portlist[] __devinitdata =
29014 { 0x300, 0x320, 0x340, 0x360, 0 };
29015
29016 static int pcnet32_debug;
29017 @@ -270,7 +270,7 @@ struct pcnet32_private {
29018 struct sk_buff **rx_skbuff;
29019 dma_addr_t *tx_dma_addr;
29020 dma_addr_t *rx_dma_addr;
29021 - struct pcnet32_access a;
29022 + struct pcnet32_access *a;
29023 spinlock_t lock; /* Guard lock */
29024 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29025 unsigned int rx_ring_size; /* current rx ring size */
29026 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29027 u16 val;
29028
29029 netif_wake_queue(dev);
29030 - val = lp->a.read_csr(ioaddr, CSR3);
29031 + val = lp->a->read_csr(ioaddr, CSR3);
29032 val &= 0x00ff;
29033 - lp->a.write_csr(ioaddr, CSR3, val);
29034 + lp->a->write_csr(ioaddr, CSR3, val);
29035 napi_enable(&lp->napi);
29036 }
29037
29038 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29039 r = mii_link_ok(&lp->mii_if);
29040 } else if (lp->chip_version >= PCNET32_79C970A) {
29041 ulong ioaddr = dev->base_addr; /* card base I/O address */
29042 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29043 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29044 } else { /* can not detect link on really old chips */
29045 r = 1;
29046 }
29047 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29048 pcnet32_netif_stop(dev);
29049
29050 spin_lock_irqsave(&lp->lock, flags);
29051 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29052 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29053
29054 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29055
29056 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29057 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29058 {
29059 struct pcnet32_private *lp = netdev_priv(dev);
29060 - struct pcnet32_access *a = &lp->a; /* access to registers */
29061 + struct pcnet32_access *a = lp->a; /* access to registers */
29062 ulong ioaddr = dev->base_addr; /* card base I/O address */
29063 struct sk_buff *skb; /* sk buff */
29064 int x, i; /* counters */
29065 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29066 pcnet32_netif_stop(dev);
29067
29068 spin_lock_irqsave(&lp->lock, flags);
29069 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29070 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29071
29072 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
29073
29074 /* Reset the PCNET32 */
29075 - lp->a.reset(ioaddr);
29076 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29077 + lp->a->reset(ioaddr);
29078 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29079
29080 /* switch pcnet32 to 32bit mode */
29081 - lp->a.write_bcr(ioaddr, 20, 2);
29082 + lp->a->write_bcr(ioaddr, 20, 2);
29083
29084 /* purge & init rings but don't actually restart */
29085 pcnet32_restart(dev, 0x0000);
29086
29087 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29088 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29089
29090 /* Initialize Transmit buffers. */
29091 size = data_len + 15;
29092 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
29093
29094 /* set int loopback in CSR15 */
29095 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
29096 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
29097 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
29098
29099 teststatus = cpu_to_le16(0x8000);
29100 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29101 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29102
29103 /* Check status of descriptors */
29104 for (x = 0; x < numbuffs; x++) {
29105 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
29106 }
29107 }
29108
29109 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29110 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29111 wmb();
29112 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
29113 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
29114 @@ -1015,7 +1015,7 @@ clean_up:
29115 pcnet32_restart(dev, CSR0_NORMAL);
29116 } else {
29117 pcnet32_purge_rx_ring(dev);
29118 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29119 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29120 }
29121 spin_unlock_irqrestore(&lp->lock, flags);
29122
29123 @@ -1025,7 +1025,7 @@ clean_up:
29124 static void pcnet32_led_blink_callback(struct net_device *dev)
29125 {
29126 struct pcnet32_private *lp = netdev_priv(dev);
29127 - struct pcnet32_access *a = &lp->a;
29128 + struct pcnet32_access *a = lp->a;
29129 ulong ioaddr = dev->base_addr;
29130 unsigned long flags;
29131 int i;
29132 @@ -1041,7 +1041,7 @@ static void pcnet32_led_blink_callback(s
29133 static int pcnet32_phys_id(struct net_device *dev, u32 data)
29134 {
29135 struct pcnet32_private *lp = netdev_priv(dev);
29136 - struct pcnet32_access *a = &lp->a;
29137 + struct pcnet32_access *a = lp->a;
29138 ulong ioaddr = dev->base_addr;
29139 unsigned long flags;
29140 int i, regs[4];
29141 @@ -1085,7 +1085,7 @@ static int pcnet32_suspend(struct net_de
29142 {
29143 int csr5;
29144 struct pcnet32_private *lp = netdev_priv(dev);
29145 - struct pcnet32_access *a = &lp->a;
29146 + struct pcnet32_access *a = lp->a;
29147 ulong ioaddr = dev->base_addr;
29148 int ticks;
29149
29150 @@ -1342,8 +1342,8 @@ static int pcnet32_poll(struct napi_stru
29151 spin_lock_irqsave(&lp->lock, flags);
29152 if (pcnet32_tx(dev)) {
29153 /* reset the chip to clear the error condition, then restart */
29154 - lp->a.reset(ioaddr);
29155 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29156 + lp->a->reset(ioaddr);
29157 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29158 pcnet32_restart(dev, CSR0_START);
29159 netif_wake_queue(dev);
29160 }
29161 @@ -1355,12 +1355,12 @@ static int pcnet32_poll(struct napi_stru
29162 __napi_complete(napi);
29163
29164 /* clear interrupt masks */
29165 - val = lp->a.read_csr(ioaddr, CSR3);
29166 + val = lp->a->read_csr(ioaddr, CSR3);
29167 val &= 0x00ff;
29168 - lp->a.write_csr(ioaddr, CSR3, val);
29169 + lp->a->write_csr(ioaddr, CSR3, val);
29170
29171 /* Set interrupt enable. */
29172 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29173 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29174
29175 spin_unlock_irqrestore(&lp->lock, flags);
29176 }
29177 @@ -1383,7 +1383,7 @@ static void pcnet32_get_regs(struct net_
29178 int i, csr0;
29179 u16 *buff = ptr;
29180 struct pcnet32_private *lp = netdev_priv(dev);
29181 - struct pcnet32_access *a = &lp->a;
29182 + struct pcnet32_access *a = lp->a;
29183 ulong ioaddr = dev->base_addr;
29184 unsigned long flags;
29185
29186 @@ -1419,9 +1419,9 @@ static void pcnet32_get_regs(struct net_
29187 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29188 if (lp->phymask & (1 << j)) {
29189 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29190 - lp->a.write_bcr(ioaddr, 33,
29191 + lp->a->write_bcr(ioaddr, 33,
29192 (j << 5) | i);
29193 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29194 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29195 }
29196 }
29197 }
29198 @@ -1803,7 +1803,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29199 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29200 lp->options |= PCNET32_PORT_FD;
29201
29202 - lp->a = *a;
29203 + lp->a = a;
29204
29205 /* prior to register_netdev, dev->name is not yet correct */
29206 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29207 @@ -1862,7 +1862,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29208 if (lp->mii) {
29209 /* lp->phycount and lp->phymask are set to 0 by memset above */
29210
29211 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29212 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29213 /* scan for PHYs */
29214 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29215 unsigned short id1, id2;
29216 @@ -1882,7 +1882,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29217 pr_info("Found PHY %04x:%04x at address %d\n",
29218 id1, id2, i);
29219 }
29220 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29221 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29222 if (lp->phycount > 1)
29223 lp->options |= PCNET32_PORT_MII;
29224 }
29225 @@ -2038,10 +2038,10 @@ static int pcnet32_open(struct net_devic
29226 }
29227
29228 /* Reset the PCNET32 */
29229 - lp->a.reset(ioaddr);
29230 + lp->a->reset(ioaddr);
29231
29232 /* switch pcnet32 to 32bit mode */
29233 - lp->a.write_bcr(ioaddr, 20, 2);
29234 + lp->a->write_bcr(ioaddr, 20, 2);
29235
29236 netif_printk(lp, ifup, KERN_DEBUG, dev,
29237 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29238 @@ -2050,14 +2050,14 @@ static int pcnet32_open(struct net_devic
29239 (u32) (lp->init_dma_addr));
29240
29241 /* set/reset autoselect bit */
29242 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29243 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29244 if (lp->options & PCNET32_PORT_ASEL)
29245 val |= 2;
29246 - lp->a.write_bcr(ioaddr, 2, val);
29247 + lp->a->write_bcr(ioaddr, 2, val);
29248
29249 /* handle full duplex setting */
29250 if (lp->mii_if.full_duplex) {
29251 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29252 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29253 if (lp->options & PCNET32_PORT_FD) {
29254 val |= 1;
29255 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29256 @@ -2067,14 +2067,14 @@ static int pcnet32_open(struct net_devic
29257 if (lp->chip_version == 0x2627)
29258 val |= 3;
29259 }
29260 - lp->a.write_bcr(ioaddr, 9, val);
29261 + lp->a->write_bcr(ioaddr, 9, val);
29262 }
29263
29264 /* set/reset GPSI bit in test register */
29265 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29266 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29267 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29268 val |= 0x10;
29269 - lp->a.write_csr(ioaddr, 124, val);
29270 + lp->a->write_csr(ioaddr, 124, val);
29271
29272 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29273 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29274 @@ -2093,24 +2093,24 @@ static int pcnet32_open(struct net_devic
29275 * duplex, and/or enable auto negotiation, and clear DANAS
29276 */
29277 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29278 - lp->a.write_bcr(ioaddr, 32,
29279 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29280 + lp->a->write_bcr(ioaddr, 32,
29281 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29282 /* disable Auto Negotiation, set 10Mpbs, HD */
29283 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29284 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29285 if (lp->options & PCNET32_PORT_FD)
29286 val |= 0x10;
29287 if (lp->options & PCNET32_PORT_100)
29288 val |= 0x08;
29289 - lp->a.write_bcr(ioaddr, 32, val);
29290 + lp->a->write_bcr(ioaddr, 32, val);
29291 } else {
29292 if (lp->options & PCNET32_PORT_ASEL) {
29293 - lp->a.write_bcr(ioaddr, 32,
29294 - lp->a.read_bcr(ioaddr,
29295 + lp->a->write_bcr(ioaddr, 32,
29296 + lp->a->read_bcr(ioaddr,
29297 32) | 0x0080);
29298 /* enable auto negotiate, setup, disable fd */
29299 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29300 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29301 val |= 0x20;
29302 - lp->a.write_bcr(ioaddr, 32, val);
29303 + lp->a->write_bcr(ioaddr, 32, val);
29304 }
29305 }
29306 } else {
29307 @@ -2123,10 +2123,10 @@ static int pcnet32_open(struct net_devic
29308 * There is really no good other way to handle multiple PHYs
29309 * other than turning off all automatics
29310 */
29311 - val = lp->a.read_bcr(ioaddr, 2);
29312 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29313 - val = lp->a.read_bcr(ioaddr, 32);
29314 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29315 + val = lp->a->read_bcr(ioaddr, 2);
29316 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29317 + val = lp->a->read_bcr(ioaddr, 32);
29318 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29319
29320 if (!(lp->options & PCNET32_PORT_ASEL)) {
29321 /* setup ecmd */
29322 @@ -2136,7 +2136,7 @@ static int pcnet32_open(struct net_devic
29323 ecmd.speed =
29324 lp->
29325 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
29326 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29327 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29328
29329 if (lp->options & PCNET32_PORT_FD) {
29330 ecmd.duplex = DUPLEX_FULL;
29331 @@ -2145,7 +2145,7 @@ static int pcnet32_open(struct net_devic
29332 ecmd.duplex = DUPLEX_HALF;
29333 bcr9 |= ~(1 << 0);
29334 }
29335 - lp->a.write_bcr(ioaddr, 9, bcr9);
29336 + lp->a->write_bcr(ioaddr, 9, bcr9);
29337 }
29338
29339 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29340 @@ -2176,9 +2176,9 @@ static int pcnet32_open(struct net_devic
29341
29342 #ifdef DO_DXSUFLO
29343 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29344 - val = lp->a.read_csr(ioaddr, CSR3);
29345 + val = lp->a->read_csr(ioaddr, CSR3);
29346 val |= 0x40;
29347 - lp->a.write_csr(ioaddr, CSR3, val);
29348 + lp->a->write_csr(ioaddr, CSR3, val);
29349 }
29350 #endif
29351
29352 @@ -2194,11 +2194,11 @@ static int pcnet32_open(struct net_devic
29353 napi_enable(&lp->napi);
29354
29355 /* Re-initialize the PCNET32, and start it when done. */
29356 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29357 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29358 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29359 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29360
29361 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29362 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29363 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29364 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29365
29366 netif_start_queue(dev);
29367
29368 @@ -2210,19 +2210,19 @@ static int pcnet32_open(struct net_devic
29369
29370 i = 0;
29371 while (i++ < 100)
29372 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29373 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29374 break;
29375 /*
29376 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29377 * reports that doing so triggers a bug in the '974.
29378 */
29379 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29380 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29381
29382 netif_printk(lp, ifup, KERN_DEBUG, dev,
29383 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29384 i,
29385 (u32) (lp->init_dma_addr),
29386 - lp->a.read_csr(ioaddr, CSR0));
29387 + lp->a->read_csr(ioaddr, CSR0));
29388
29389 spin_unlock_irqrestore(&lp->lock, flags);
29390
29391 @@ -2236,7 +2236,7 @@ err_free_ring:
29392 * Switch back to 16bit mode to avoid problems with dumb
29393 * DOS packet driver after a warm reboot
29394 */
29395 - lp->a.write_bcr(ioaddr, 20, 4);
29396 + lp->a->write_bcr(ioaddr, 20, 4);
29397
29398 err_free_irq:
29399 spin_unlock_irqrestore(&lp->lock, flags);
29400 @@ -2341,7 +2341,7 @@ static void pcnet32_restart(struct net_d
29401
29402 /* wait for stop */
29403 for (i = 0; i < 100; i++)
29404 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29405 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29406 break;
29407
29408 if (i >= 100)
29409 @@ -2353,13 +2353,13 @@ static void pcnet32_restart(struct net_d
29410 return;
29411
29412 /* ReInit Ring */
29413 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29414 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29415 i = 0;
29416 while (i++ < 1000)
29417 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29418 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29419 break;
29420
29421 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29422 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29423 }
29424
29425 static void pcnet32_tx_timeout(struct net_device *dev)
29426 @@ -2371,8 +2371,8 @@ static void pcnet32_tx_timeout(struct ne
29427 /* Transmitter timeout, serious problems. */
29428 if (pcnet32_debug & NETIF_MSG_DRV)
29429 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29430 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29431 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29432 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29433 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29434 dev->stats.tx_errors++;
29435 if (netif_msg_tx_err(lp)) {
29436 int i;
29437 @@ -2415,7 +2415,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29438
29439 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29440 "%s() called, csr0 %4.4x\n",
29441 - __func__, lp->a.read_csr(ioaddr, CSR0));
29442 + __func__, lp->a->read_csr(ioaddr, CSR0));
29443
29444 /* Default status -- will not enable Successful-TxDone
29445 * interrupt when that option is available to us.
29446 @@ -2445,7 +2445,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29447 dev->stats.tx_bytes += skb->len;
29448
29449 /* Trigger an immediate send poll. */
29450 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29451 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29452
29453 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29454 lp->tx_full = 1;
29455 @@ -2470,16 +2470,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29456
29457 spin_lock(&lp->lock);
29458
29459 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29460 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29461 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29462 if (csr0 == 0xffff)
29463 break; /* PCMCIA remove happened */
29464 /* Acknowledge all of the current interrupt sources ASAP. */
29465 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29466 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29467
29468 netif_printk(lp, intr, KERN_DEBUG, dev,
29469 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29470 - csr0, lp->a.read_csr(ioaddr, CSR0));
29471 + csr0, lp->a->read_csr(ioaddr, CSR0));
29472
29473 /* Log misc errors. */
29474 if (csr0 & 0x4000)
29475 @@ -2506,19 +2506,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29476 if (napi_schedule_prep(&lp->napi)) {
29477 u16 val;
29478 /* set interrupt masks */
29479 - val = lp->a.read_csr(ioaddr, CSR3);
29480 + val = lp->a->read_csr(ioaddr, CSR3);
29481 val |= 0x5f00;
29482 - lp->a.write_csr(ioaddr, CSR3, val);
29483 + lp->a->write_csr(ioaddr, CSR3, val);
29484
29485 __napi_schedule(&lp->napi);
29486 break;
29487 }
29488 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29489 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29490 }
29491
29492 netif_printk(lp, intr, KERN_DEBUG, dev,
29493 "exiting interrupt, csr0=%#4.4x\n",
29494 - lp->a.read_csr(ioaddr, CSR0));
29495 + lp->a->read_csr(ioaddr, CSR0));
29496
29497 spin_unlock(&lp->lock);
29498
29499 @@ -2538,20 +2538,20 @@ static int pcnet32_close(struct net_devi
29500
29501 spin_lock_irqsave(&lp->lock, flags);
29502
29503 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29504 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29505
29506 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29507 "Shutting down ethercard, status was %2.2x\n",
29508 - lp->a.read_csr(ioaddr, CSR0));
29509 + lp->a->read_csr(ioaddr, CSR0));
29510
29511 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29512 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29513 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29514
29515 /*
29516 * Switch back to 16bit mode to avoid problems with dumb
29517 * DOS packet driver after a warm reboot
29518 */
29519 - lp->a.write_bcr(ioaddr, 20, 4);
29520 + lp->a->write_bcr(ioaddr, 20, 4);
29521
29522 spin_unlock_irqrestore(&lp->lock, flags);
29523
29524 @@ -2574,7 +2574,7 @@ static struct net_device_stats *pcnet32_
29525 unsigned long flags;
29526
29527 spin_lock_irqsave(&lp->lock, flags);
29528 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29529 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29530 spin_unlock_irqrestore(&lp->lock, flags);
29531
29532 return &dev->stats;
29533 @@ -2596,10 +2596,10 @@ static void pcnet32_load_multicast(struc
29534 if (dev->flags & IFF_ALLMULTI) {
29535 ib->filter[0] = cpu_to_le32(~0U);
29536 ib->filter[1] = cpu_to_le32(~0U);
29537 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29538 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29539 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29540 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29541 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29542 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29543 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29544 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29545 return;
29546 }
29547 /* clear the multicast filter */
29548 @@ -2619,7 +2619,7 @@ static void pcnet32_load_multicast(struc
29549 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29550 }
29551 for (i = 0; i < 4; i++)
29552 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29553 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29554 le16_to_cpu(mcast_table[i]));
29555 }
29556
29557 @@ -2634,28 +2634,28 @@ static void pcnet32_set_multicast_list(s
29558
29559 spin_lock_irqsave(&lp->lock, flags);
29560 suspended = pcnet32_suspend(dev, &flags, 0);
29561 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29562 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29563 if (dev->flags & IFF_PROMISC) {
29564 /* Log any net taps. */
29565 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29566 lp->init_block->mode =
29567 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29568 7);
29569 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29570 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29571 } else {
29572 lp->init_block->mode =
29573 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29574 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29575 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29576 pcnet32_load_multicast(dev);
29577 }
29578
29579 if (suspended) {
29580 int csr5;
29581 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29582 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29583 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29584 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29585 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29586 } else {
29587 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29588 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29589 pcnet32_restart(dev, CSR0_NORMAL);
29590 netif_wake_queue(dev);
29591 }
29592 @@ -2673,8 +2673,8 @@ static int mdio_read(struct net_device *
29593 if (!lp->mii)
29594 return 0;
29595
29596 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29597 - val_out = lp->a.read_bcr(ioaddr, 34);
29598 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29599 + val_out = lp->a->read_bcr(ioaddr, 34);
29600
29601 return val_out;
29602 }
29603 @@ -2688,8 +2688,8 @@ static void mdio_write(struct net_device
29604 if (!lp->mii)
29605 return;
29606
29607 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29608 - lp->a.write_bcr(ioaddr, 34, val);
29609 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29610 + lp->a->write_bcr(ioaddr, 34, val);
29611 }
29612
29613 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29614 @@ -2766,7 +2766,7 @@ static void pcnet32_check_media(struct n
29615 curr_link = mii_link_ok(&lp->mii_if);
29616 } else {
29617 ulong ioaddr = dev->base_addr; /* card base I/O address */
29618 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29619 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29620 }
29621 if (!curr_link) {
29622 if (prev_link || verbose) {
29623 @@ -2789,13 +2789,13 @@ static void pcnet32_check_media(struct n
29624 (ecmd.duplex == DUPLEX_FULL)
29625 ? "full" : "half");
29626 }
29627 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29628 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29629 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29630 if (lp->mii_if.full_duplex)
29631 bcr9 |= (1 << 0);
29632 else
29633 bcr9 &= ~(1 << 0);
29634 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29635 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29636 }
29637 } else {
29638 netif_info(lp, link, dev, "link up\n");
29639 diff -urNp linux-2.6.39.4/drivers/net/ppp_generic.c linux-2.6.39.4/drivers/net/ppp_generic.c
29640 --- linux-2.6.39.4/drivers/net/ppp_generic.c 2011-05-19 00:06:34.000000000 -0400
29641 +++ linux-2.6.39.4/drivers/net/ppp_generic.c 2011-08-05 19:44:37.000000000 -0400
29642 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29643 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29644 struct ppp_stats stats;
29645 struct ppp_comp_stats cstats;
29646 - char *vers;
29647
29648 switch (cmd) {
29649 case SIOCGPPPSTATS:
29650 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29651 break;
29652
29653 case SIOCGPPPVER:
29654 - vers = PPP_VERSION;
29655 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29656 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29657 break;
29658 err = 0;
29659 break;
29660 diff -urNp linux-2.6.39.4/drivers/net/r8169.c linux-2.6.39.4/drivers/net/r8169.c
29661 --- linux-2.6.39.4/drivers/net/r8169.c 2011-05-19 00:06:34.000000000 -0400
29662 +++ linux-2.6.39.4/drivers/net/r8169.c 2011-08-05 20:34:06.000000000 -0400
29663 @@ -552,12 +552,12 @@ struct rtl8169_private {
29664 struct mdio_ops {
29665 void (*write)(void __iomem *, int, int);
29666 int (*read)(void __iomem *, int);
29667 - } mdio_ops;
29668 + } __no_const mdio_ops;
29669
29670 struct pll_power_ops {
29671 void (*down)(struct rtl8169_private *);
29672 void (*up)(struct rtl8169_private *);
29673 - } pll_power_ops;
29674 + } __no_const pll_power_ops;
29675
29676 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29677 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29678 diff -urNp linux-2.6.39.4/drivers/net/tg3.h linux-2.6.39.4/drivers/net/tg3.h
29679 --- linux-2.6.39.4/drivers/net/tg3.h 2011-05-19 00:06:34.000000000 -0400
29680 +++ linux-2.6.39.4/drivers/net/tg3.h 2011-08-05 19:44:37.000000000 -0400
29681 @@ -131,6 +131,7 @@
29682 #define CHIPREV_ID_5750_A0 0x4000
29683 #define CHIPREV_ID_5750_A1 0x4001
29684 #define CHIPREV_ID_5750_A3 0x4003
29685 +#define CHIPREV_ID_5750_C1 0x4201
29686 #define CHIPREV_ID_5750_C2 0x4202
29687 #define CHIPREV_ID_5752_A0_HW 0x5000
29688 #define CHIPREV_ID_5752_A0 0x6000
29689 diff -urNp linux-2.6.39.4/drivers/net/tokenring/abyss.c linux-2.6.39.4/drivers/net/tokenring/abyss.c
29690 --- linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-05-19 00:06:34.000000000 -0400
29691 +++ linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-08-05 20:34:06.000000000 -0400
29692 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29693
29694 static int __init abyss_init (void)
29695 {
29696 - abyss_netdev_ops = tms380tr_netdev_ops;
29697 + pax_open_kernel();
29698 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29699
29700 - abyss_netdev_ops.ndo_open = abyss_open;
29701 - abyss_netdev_ops.ndo_stop = abyss_close;
29702 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29703 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29704 + pax_close_kernel();
29705
29706 return pci_register_driver(&abyss_driver);
29707 }
29708 diff -urNp linux-2.6.39.4/drivers/net/tokenring/madgemc.c linux-2.6.39.4/drivers/net/tokenring/madgemc.c
29709 --- linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-05-19 00:06:34.000000000 -0400
29710 +++ linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-08-05 20:34:06.000000000 -0400
29711 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29712
29713 static int __init madgemc_init (void)
29714 {
29715 - madgemc_netdev_ops = tms380tr_netdev_ops;
29716 - madgemc_netdev_ops.ndo_open = madgemc_open;
29717 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29718 + pax_open_kernel();
29719 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29720 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29721 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29722 + pax_close_kernel();
29723
29724 return mca_register_driver (&madgemc_driver);
29725 }
29726 diff -urNp linux-2.6.39.4/drivers/net/tokenring/proteon.c linux-2.6.39.4/drivers/net/tokenring/proteon.c
29727 --- linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-05-19 00:06:34.000000000 -0400
29728 +++ linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-08-05 20:34:06.000000000 -0400
29729 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29730 struct platform_device *pdev;
29731 int i, num = 0, err = 0;
29732
29733 - proteon_netdev_ops = tms380tr_netdev_ops;
29734 - proteon_netdev_ops.ndo_open = proteon_open;
29735 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29736 + pax_open_kernel();
29737 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29738 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29739 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29740 + pax_close_kernel();
29741
29742 err = platform_driver_register(&proteon_driver);
29743 if (err)
29744 diff -urNp linux-2.6.39.4/drivers/net/tokenring/skisa.c linux-2.6.39.4/drivers/net/tokenring/skisa.c
29745 --- linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-05-19 00:06:34.000000000 -0400
29746 +++ linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-08-05 20:34:06.000000000 -0400
29747 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29748 struct platform_device *pdev;
29749 int i, num = 0, err = 0;
29750
29751 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29752 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29753 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29754 + pax_open_kernel();
29755 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29756 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29757 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29758 + pax_close_kernel();
29759
29760 err = platform_driver_register(&sk_isa_driver);
29761 if (err)
29762 diff -urNp linux-2.6.39.4/drivers/net/tulip/de2104x.c linux-2.6.39.4/drivers/net/tulip/de2104x.c
29763 --- linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-05-19 00:06:34.000000000 -0400
29764 +++ linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-08-05 19:44:37.000000000 -0400
29765 @@ -1817,6 +1817,8 @@ static void __devinit de21041_get_srom_i
29766 struct de_srom_info_leaf *il;
29767 void *bufp;
29768
29769 + pax_track_stack();
29770 +
29771 /* download entire eeprom */
29772 for (i = 0; i < DE_EEPROM_WORDS; i++)
29773 ((__le16 *)ee_data)[i] =
29774 diff -urNp linux-2.6.39.4/drivers/net/tulip/de4x5.c linux-2.6.39.4/drivers/net/tulip/de4x5.c
29775 --- linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-05-19 00:06:34.000000000 -0400
29776 +++ linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-08-05 19:44:37.000000000 -0400
29777 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29778 for (i=0; i<ETH_ALEN; i++) {
29779 tmp.addr[i] = dev->dev_addr[i];
29780 }
29781 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29782 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29783 break;
29784
29785 case DE4X5_SET_HWADDR: /* Set the hardware address */
29786 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29787 spin_lock_irqsave(&lp->lock, flags);
29788 memcpy(&statbuf, &lp->pktStats, ioc->len);
29789 spin_unlock_irqrestore(&lp->lock, flags);
29790 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29791 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29792 return -EFAULT;
29793 break;
29794 }
29795 diff -urNp linux-2.6.39.4/drivers/net/usb/hso.c linux-2.6.39.4/drivers/net/usb/hso.c
29796 --- linux-2.6.39.4/drivers/net/usb/hso.c 2011-05-19 00:06:34.000000000 -0400
29797 +++ linux-2.6.39.4/drivers/net/usb/hso.c 2011-08-05 19:44:37.000000000 -0400
29798 @@ -71,7 +71,7 @@
29799 #include <asm/byteorder.h>
29800 #include <linux/serial_core.h>
29801 #include <linux/serial.h>
29802 -
29803 +#include <asm/local.h>
29804
29805 #define MOD_AUTHOR "Option Wireless"
29806 #define MOD_DESCRIPTION "USB High Speed Option driver"
29807 @@ -257,7 +257,7 @@ struct hso_serial {
29808
29809 /* from usb_serial_port */
29810 struct tty_struct *tty;
29811 - int open_count;
29812 + local_t open_count;
29813 spinlock_t serial_lock;
29814
29815 int (*write_data) (struct hso_serial *serial);
29816 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29817 struct urb *urb;
29818
29819 urb = serial->rx_urb[0];
29820 - if (serial->open_count > 0) {
29821 + if (local_read(&serial->open_count) > 0) {
29822 count = put_rxbuf_data(urb, serial);
29823 if (count == -1)
29824 return;
29825 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29826 DUMP1(urb->transfer_buffer, urb->actual_length);
29827
29828 /* Anyone listening? */
29829 - if (serial->open_count == 0)
29830 + if (local_read(&serial->open_count) == 0)
29831 return;
29832
29833 if (status == 0) {
29834 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29835 spin_unlock_irq(&serial->serial_lock);
29836
29837 /* check for port already opened, if not set the termios */
29838 - serial->open_count++;
29839 - if (serial->open_count == 1) {
29840 + if (local_inc_return(&serial->open_count) == 1) {
29841 serial->rx_state = RX_IDLE;
29842 /* Force default termio settings */
29843 _hso_serial_set_termios(tty, NULL);
29844 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29845 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29846 if (result) {
29847 hso_stop_serial_device(serial->parent);
29848 - serial->open_count--;
29849 + local_dec(&serial->open_count);
29850 kref_put(&serial->parent->ref, hso_serial_ref_free);
29851 }
29852 } else {
29853 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29854
29855 /* reset the rts and dtr */
29856 /* do the actual close */
29857 - serial->open_count--;
29858 + local_dec(&serial->open_count);
29859
29860 - if (serial->open_count <= 0) {
29861 - serial->open_count = 0;
29862 + if (local_read(&serial->open_count) <= 0) {
29863 + local_set(&serial->open_count, 0);
29864 spin_lock_irq(&serial->serial_lock);
29865 if (serial->tty == tty) {
29866 serial->tty->driver_data = NULL;
29867 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29868
29869 /* the actual setup */
29870 spin_lock_irqsave(&serial->serial_lock, flags);
29871 - if (serial->open_count)
29872 + if (local_read(&serial->open_count))
29873 _hso_serial_set_termios(tty, old);
29874 else
29875 tty->termios = old;
29876 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29877 D1("Pending read interrupt on port %d\n", i);
29878 spin_lock(&serial->serial_lock);
29879 if (serial->rx_state == RX_IDLE &&
29880 - serial->open_count > 0) {
29881 + local_read(&serial->open_count) > 0) {
29882 /* Setup and send a ctrl req read on
29883 * port i */
29884 if (!serial->rx_urb_filled[0]) {
29885 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
29886 /* Start all serial ports */
29887 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29888 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29889 - if (dev2ser(serial_table[i])->open_count) {
29890 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29891 result =
29892 hso_start_serial_device(serial_table[i], GFP_NOIO);
29893 hso_kick_transmit(dev2ser(serial_table[i]));
29894 diff -urNp linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29895 --- linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-05-19 00:06:34.000000000 -0400
29896 +++ linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-05 19:44:37.000000000 -0400
29897 @@ -631,8 +631,7 @@ vmxnet3_set_rss_indir(struct net_device
29898 * Return with error code if any of the queue indices
29899 * is out of range
29900 */
29901 - if (p->ring_index[i] < 0 ||
29902 - p->ring_index[i] >= adapter->num_rx_queues)
29903 + if (p->ring_index[i] >= adapter->num_rx_queues)
29904 return -EINVAL;
29905 }
29906
29907 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-config.h linux-2.6.39.4/drivers/net/vxge/vxge-config.h
29908 --- linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-05-19 00:06:34.000000000 -0400
29909 +++ linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-08-05 20:34:06.000000000 -0400
29910 @@ -508,7 +508,7 @@ struct vxge_hw_uld_cbs {
29911 void (*link_down)(struct __vxge_hw_device *devh);
29912 void (*crit_err)(struct __vxge_hw_device *devh,
29913 enum vxge_hw_event type, u64 ext_data);
29914 -};
29915 +} __no_const;
29916
29917 /*
29918 * struct __vxge_hw_blockpool_entry - Block private data structure
29919 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-main.c linux-2.6.39.4/drivers/net/vxge/vxge-main.c
29920 --- linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-05-19 00:06:34.000000000 -0400
29921 +++ linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-08-05 19:44:37.000000000 -0400
29922 @@ -97,6 +97,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29923 struct sk_buff *completed[NR_SKB_COMPLETED];
29924 int more;
29925
29926 + pax_track_stack();
29927 +
29928 do {
29929 more = 0;
29930 skb_ptr = completed;
29931 @@ -1927,6 +1929,8 @@ static enum vxge_hw_status vxge_rth_conf
29932 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29933 int index;
29934
29935 + pax_track_stack();
29936 +
29937 /*
29938 * Filling
29939 * - itable with bucket numbers
29940 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h
29941 --- linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-05-19 00:06:34.000000000 -0400
29942 +++ linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:34:06.000000000 -0400
29943 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29944 struct vxge_hw_mempool_dma *dma_object,
29945 u32 index,
29946 u32 is_last);
29947 -};
29948 +} __no_const;
29949
29950 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29951 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29952 diff -urNp linux-2.6.39.4/drivers/net/wan/cycx_x25.c linux-2.6.39.4/drivers/net/wan/cycx_x25.c
29953 --- linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-05-19 00:06:34.000000000 -0400
29954 +++ linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-08-05 19:44:37.000000000 -0400
29955 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29956 unsigned char hex[1024],
29957 * phex = hex;
29958
29959 + pax_track_stack();
29960 +
29961 if (len >= (sizeof(hex) / 2))
29962 len = (sizeof(hex) / 2) - 1;
29963
29964 diff -urNp linux-2.6.39.4/drivers/net/wan/hdlc_x25.c linux-2.6.39.4/drivers/net/wan/hdlc_x25.c
29965 --- linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-05-19 00:06:34.000000000 -0400
29966 +++ linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-08-05 20:34:06.000000000 -0400
29967 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
29968
29969 static int x25_open(struct net_device *dev)
29970 {
29971 - struct lapb_register_struct cb;
29972 + static struct lapb_register_struct cb = {
29973 + .connect_confirmation = x25_connected,
29974 + .connect_indication = x25_connected,
29975 + .disconnect_confirmation = x25_disconnected,
29976 + .disconnect_indication = x25_disconnected,
29977 + .data_indication = x25_data_indication,
29978 + .data_transmit = x25_data_transmit
29979 + };
29980 int result;
29981
29982 - cb.connect_confirmation = x25_connected;
29983 - cb.connect_indication = x25_connected;
29984 - cb.disconnect_confirmation = x25_disconnected;
29985 - cb.disconnect_indication = x25_disconnected;
29986 - cb.data_indication = x25_data_indication;
29987 - cb.data_transmit = x25_data_transmit;
29988 -
29989 result = lapb_register(dev, &cb);
29990 if (result != LAPB_OK)
29991 return result;
29992 diff -urNp linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c
29993 --- linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-05-19 00:06:34.000000000 -0400
29994 +++ linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-05 19:44:37.000000000 -0400
29995 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
29996 int do_autopm = 1;
29997 DECLARE_COMPLETION_ONSTACK(notif_completion);
29998
29999 + pax_track_stack();
30000 +
30001 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30002 i2400m, ack, ack_size);
30003 BUG_ON(_ack == i2400m->bm_ack_buf);
30004 diff -urNp linux-2.6.39.4/drivers/net/wireless/airo.c linux-2.6.39.4/drivers/net/wireless/airo.c
30005 --- linux-2.6.39.4/drivers/net/wireless/airo.c 2011-05-19 00:06:34.000000000 -0400
30006 +++ linux-2.6.39.4/drivers/net/wireless/airo.c 2011-08-05 19:44:37.000000000 -0400
30007 @@ -3001,6 +3001,8 @@ static void airo_process_scan_results (s
30008 BSSListElement * loop_net;
30009 BSSListElement * tmp_net;
30010
30011 + pax_track_stack();
30012 +
30013 /* Blow away current list of scan results */
30014 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30015 list_move_tail (&loop_net->list, &ai->network_free_list);
30016 @@ -3792,6 +3794,8 @@ static u16 setup_card(struct airo_info *
30017 WepKeyRid wkr;
30018 int rc;
30019
30020 + pax_track_stack();
30021 +
30022 memset( &mySsid, 0, sizeof( mySsid ) );
30023 kfree (ai->flash);
30024 ai->flash = NULL;
30025 @@ -4760,6 +4764,8 @@ static int proc_stats_rid_open( struct i
30026 __le32 *vals = stats.vals;
30027 int len;
30028
30029 + pax_track_stack();
30030 +
30031 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30032 return -ENOMEM;
30033 data = file->private_data;
30034 @@ -5483,6 +5489,8 @@ static int proc_BSSList_open( struct ino
30035 /* If doLoseSync is not 1, we won't do a Lose Sync */
30036 int doLoseSync = -1;
30037
30038 + pax_track_stack();
30039 +
30040 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30041 return -ENOMEM;
30042 data = file->private_data;
30043 @@ -7190,6 +7198,8 @@ static int airo_get_aplist(struct net_de
30044 int i;
30045 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30046
30047 + pax_track_stack();
30048 +
30049 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30050 if (!qual)
30051 return -ENOMEM;
30052 @@ -7750,6 +7760,8 @@ static void airo_read_wireless_stats(str
30053 CapabilityRid cap_rid;
30054 __le32 *vals = stats_rid.vals;
30055
30056 + pax_track_stack();
30057 +
30058 /* Get stats out of the card */
30059 clear_bit(JOB_WSTATS, &local->jobs);
30060 if (local->power.event) {
30061 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c
30062 --- linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-05-19 00:06:34.000000000 -0400
30063 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-05 19:44:37.000000000 -0400
30064 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30065 unsigned int v;
30066 u64 tsf;
30067
30068 + pax_track_stack();
30069 +
30070 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30071 len += snprintf(buf+len, sizeof(buf)-len,
30072 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30073 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30074 unsigned int len = 0;
30075 unsigned int i;
30076
30077 + pax_track_stack();
30078 +
30079 len += snprintf(buf+len, sizeof(buf)-len,
30080 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
30081
30082 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
30083 unsigned int i;
30084 unsigned int v;
30085
30086 + pax_track_stack();
30087 +
30088 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
30089 sc->ah->ah_ant_mode);
30090 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
30091 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
30092 unsigned int len = 0;
30093 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
30094
30095 + pax_track_stack();
30096 +
30097 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
30098 sc->bssidmask);
30099 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
30100 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
30101 unsigned int len = 0;
30102 int i;
30103
30104 + pax_track_stack();
30105 +
30106 len += snprintf(buf+len, sizeof(buf)-len,
30107 "RX\n---------------------\n");
30108 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
30109 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
30110 char buf[700];
30111 unsigned int len = 0;
30112
30113 + pax_track_stack();
30114 +
30115 len += snprintf(buf+len, sizeof(buf)-len,
30116 "HW has PHY error counters:\t%s\n",
30117 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
30118 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
30119 struct ath5k_buf *bf, *bf0;
30120 int i, n;
30121
30122 + pax_track_stack();
30123 +
30124 len += snprintf(buf+len, sizeof(buf)-len,
30125 "available txbuffers: %d\n", sc->txbuf_len);
30126
30127 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30128 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-05-19 00:06:34.000000000 -0400
30129 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-05 19:44:37.000000000 -0400
30130 @@ -734,6 +734,8 @@ static void ar9003_hw_tx_iq_cal(struct a
30131 s32 i, j, ip, im, nmeasurement;
30132 u8 nchains = get_streams(common->tx_chainmask);
30133
30134 + pax_track_stack();
30135 +
30136 for (ip = 0; ip < MPASS; ip++) {
30137 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
30138 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
30139 @@ -856,6 +858,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30140 int i, ip, im, j;
30141 int nmeasurement;
30142
30143 + pax_track_stack();
30144 +
30145 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30146 if (ah->txchainmask & (1 << i))
30147 num_chains++;
30148 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30149 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-05-19 00:06:34.000000000 -0400
30150 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-05 19:44:37.000000000 -0400
30151 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30152 int theta_low_bin = 0;
30153 int i;
30154
30155 + pax_track_stack();
30156 +
30157 /* disregard any bin that contains <= 16 samples */
30158 thresh_accum_cnt = 16;
30159 scale_factor = 5;
30160 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c
30161 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-05-19 00:06:34.000000000 -0400
30162 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-05 19:44:37.000000000 -0400
30163 @@ -335,6 +335,8 @@ static ssize_t read_file_interrupt(struc
30164 char buf[512];
30165 unsigned int len = 0;
30166
30167 + pax_track_stack();
30168 +
30169 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30170 len += snprintf(buf + len, sizeof(buf) - len,
30171 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30172 @@ -422,6 +424,8 @@ static ssize_t read_file_wiphy(struct fi
30173 u8 addr[ETH_ALEN];
30174 u32 tmp;
30175
30176 + pax_track_stack();
30177 +
30178 len += snprintf(buf + len, sizeof(buf) - len,
30179 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30180 wiphy_name(sc->hw->wiphy),
30181 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c
30182 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-05-19 00:06:34.000000000 -0400
30183 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-08-05 20:34:06.000000000 -0400
30184 @@ -737,6 +737,8 @@ static ssize_t read_file_tgt_stats(struc
30185 unsigned int len = 0;
30186 int ret = 0;
30187
30188 + pax_track_stack();
30189 +
30190 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30191
30192 WMI_CMD(WMI_TGT_STATS_CMDID);
30193 @@ -782,6 +784,8 @@ static ssize_t read_file_xmit(struct fil
30194 char buf[512];
30195 unsigned int len = 0;
30196
30197 + pax_track_stack();
30198 +
30199 len += snprintf(buf + len, sizeof(buf) - len,
30200 "%20s : %10u\n", "Buffers queued",
30201 priv->debug.tx_stats.buf_queued);
30202 @@ -831,6 +835,8 @@ static ssize_t read_file_recv(struct fil
30203 char buf[512];
30204 unsigned int len = 0;
30205
30206 + pax_track_stack();
30207 +
30208 len += snprintf(buf + len, sizeof(buf) - len,
30209 "%20s : %10u\n", "SKBs allocated",
30210 priv->debug.rx_stats.skb_allocated);
30211 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h
30212 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-05-19 00:06:34.000000000 -0400
30213 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-05 20:34:06.000000000 -0400
30214 @@ -592,7 +592,7 @@ struct ath_hw_private_ops {
30215
30216 /* ANI */
30217 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30218 -};
30219 +} __no_const;
30220
30221 /**
30222 * struct ath_hw_ops - callbacks used by hardware code and driver code
30223 @@ -642,7 +642,7 @@ struct ath_hw_ops {
30224 u32 burstDuration);
30225 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
30226 u32 vmf);
30227 -};
30228 +} __no_const;
30229
30230 struct ath_nf_limits {
30231 s16 max;
30232 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c
30233 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-19 00:06:34.000000000 -0400
30234 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-05 19:44:37.000000000 -0400
30235 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30236 int err;
30237 DECLARE_SSID_BUF(ssid);
30238
30239 + pax_track_stack();
30240 +
30241 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30242
30243 if (ssid_len)
30244 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30245 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30246 int err;
30247
30248 + pax_track_stack();
30249 +
30250 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30251 idx, keylen, len);
30252
30253 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30254 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-19 00:06:34.000000000 -0400
30255 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-05 19:44:37.000000000 -0400
30256 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30257 unsigned long flags;
30258 DECLARE_SSID_BUF(ssid);
30259
30260 + pax_track_stack();
30261 +
30262 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30263 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30264 print_ssid(ssid, info_element->data, info_element->len),
30265 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30266 --- linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-05-19 00:06:34.000000000 -0400
30267 +++ linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-05 20:34:06.000000000 -0400
30268 @@ -3958,7 +3958,9 @@ static int iwl3945_pci_probe(struct pci_
30269 */
30270 if (iwl3945_mod_params.disable_hw_scan) {
30271 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30272 - iwl3945_hw_ops.hw_scan = NULL;
30273 + pax_open_kernel();
30274 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30275 + pax_close_kernel();
30276 }
30277
30278 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30279 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c
30280 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-06-25 12:55:22.000000000 -0400
30281 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:34:06.000000000 -0400
30282 @@ -3974,7 +3974,9 @@ static int iwl_pci_probe(struct pci_dev
30283 if (cfg->mod_params->disable_hw_scan) {
30284 dev_printk(KERN_DEBUG, &(pdev->dev),
30285 "sw scan support is deprecated\n");
30286 - iwlagn_hw_ops.hw_scan = NULL;
30287 + pax_open_kernel();
30288 + *(void **)&iwlagn_hw_ops.hw_scan = NULL;
30289 + pax_close_kernel();
30290 }
30291
30292 hw = iwl_alloc_all(cfg);
30293 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30294 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-19 00:06:34.000000000 -0400
30295 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-05 19:44:37.000000000 -0400
30296 @@ -883,6 +883,8 @@ static void rs_tx_status(void *priv_r, s
30297 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30298 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30299
30300 + pax_track_stack();
30301 +
30302 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30303
30304 /* Treat uninitialized rate scaling data same as non-existing. */
30305 @@ -2894,6 +2896,8 @@ static void rs_fill_link_cmd(struct iwl_
30306 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30307 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30308
30309 + pax_track_stack();
30310 +
30311 /* Override starting rate (index 0) if needed for debug purposes */
30312 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30313
30314 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30315 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-19 00:06:34.000000000 -0400
30316 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-05 19:44:37.000000000 -0400
30317 @@ -549,6 +549,8 @@ static ssize_t iwl_dbgfs_status_read(str
30318 int pos = 0;
30319 const size_t bufsz = sizeof(buf);
30320
30321 + pax_track_stack();
30322 +
30323 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30324 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30325 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30326 @@ -681,6 +683,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30327 char buf[256 * NUM_IWL_RXON_CTX];
30328 const size_t bufsz = sizeof(buf);
30329
30330 + pax_track_stack();
30331 +
30332 for_each_context(priv, ctx) {
30333 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30334 ctx->ctxid);
30335 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30336 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-05-19 00:06:34.000000000 -0400
30337 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-05 19:44:37.000000000 -0400
30338 @@ -68,8 +68,8 @@ do {
30339 } while (0)
30340
30341 #else
30342 -#define IWL_DEBUG(__priv, level, fmt, args...)
30343 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30344 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30345 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30346 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30347 const void *p, u32 len)
30348 {}
30349 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30350 --- linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-19 00:06:34.000000000 -0400
30351 +++ linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-05 19:44:37.000000000 -0400
30352 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30353 int buf_len = 512;
30354 size_t len = 0;
30355
30356 + pax_track_stack();
30357 +
30358 if (*ppos != 0)
30359 return 0;
30360 if (count < sizeof(buf))
30361 diff -urNp linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c
30362 --- linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-05-19 00:06:34.000000000 -0400
30363 +++ linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-05 20:34:06.000000000 -0400
30364 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30365 return -EINVAL;
30366
30367 if (fake_hw_scan) {
30368 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30369 - mac80211_hwsim_ops.sw_scan_start = NULL;
30370 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30371 + pax_open_kernel();
30372 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30373 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30374 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30375 + pax_close_kernel();
30376 }
30377
30378 spin_lock_init(&hwsim_radio_lock);
30379 diff -urNp linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c
30380 --- linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-05-19 00:06:34.000000000 -0400
30381 +++ linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-08-05 19:44:37.000000000 -0400
30382 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30383
30384 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30385
30386 - if (rts_threshold < 0 || rts_threshold > 2347)
30387 + if (rts_threshold > 2347)
30388 rts_threshold = 2347;
30389
30390 tmp = cpu_to_le32(rts_threshold);
30391 diff -urNp linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30392 --- linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-05-19 00:06:34.000000000 -0400
30393 +++ linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-05 19:44:37.000000000 -0400
30394 @@ -827,6 +827,8 @@ static bool _rtl92c_phy_sw_chnl_step_by_
30395 u8 rfpath;
30396 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30397
30398 + pax_track_stack();
30399 +
30400 precommoncmdcnt = 0;
30401 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30402 MAX_PRECMD_CNT,
30403 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h
30404 --- linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-05-19 00:06:34.000000000 -0400
30405 +++ linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-05 20:34:06.000000000 -0400
30406 @@ -260,7 +260,7 @@ struct wl1251_if_operations {
30407 void (*reset)(struct wl1251 *wl);
30408 void (*enable_irq)(struct wl1251 *wl);
30409 void (*disable_irq)(struct wl1251 *wl);
30410 -};
30411 +} __no_const;
30412
30413 struct wl1251 {
30414 struct ieee80211_hw *hw;
30415 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c
30416 --- linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-05-19 00:06:34.000000000 -0400
30417 +++ linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-08-05 19:44:37.000000000 -0400
30418 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30419 u32 chunk_len;
30420 int i;
30421
30422 + pax_track_stack();
30423 +
30424 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30425
30426 spi_message_init(&m);
30427 diff -urNp linux-2.6.39.4/drivers/oprofile/buffer_sync.c linux-2.6.39.4/drivers/oprofile/buffer_sync.c
30428 --- linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-06-25 12:55:22.000000000 -0400
30429 +++ linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-08-05 19:44:37.000000000 -0400
30430 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30431 if (cookie == NO_COOKIE)
30432 offset = pc;
30433 if (cookie == INVALID_COOKIE) {
30434 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30435 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30436 offset = pc;
30437 }
30438 if (cookie != last_cookie) {
30439 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30440 /* add userspace sample */
30441
30442 if (!mm) {
30443 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30444 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30445 return 0;
30446 }
30447
30448 cookie = lookup_dcookie(mm, s->eip, &offset);
30449
30450 if (cookie == INVALID_COOKIE) {
30451 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30452 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30453 return 0;
30454 }
30455
30456 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30457 /* ignore backtraces if failed to add a sample */
30458 if (state == sb_bt_start) {
30459 state = sb_bt_ignore;
30460 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30461 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30462 }
30463 }
30464 release_mm(mm);
30465 diff -urNp linux-2.6.39.4/drivers/oprofile/event_buffer.c linux-2.6.39.4/drivers/oprofile/event_buffer.c
30466 --- linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-05-19 00:06:34.000000000 -0400
30467 +++ linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-08-05 19:44:37.000000000 -0400
30468 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30469 }
30470
30471 if (buffer_pos == buffer_size) {
30472 - atomic_inc(&oprofile_stats.event_lost_overflow);
30473 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30474 return;
30475 }
30476
30477 diff -urNp linux-2.6.39.4/drivers/oprofile/oprof.c linux-2.6.39.4/drivers/oprofile/oprof.c
30478 --- linux-2.6.39.4/drivers/oprofile/oprof.c 2011-05-19 00:06:34.000000000 -0400
30479 +++ linux-2.6.39.4/drivers/oprofile/oprof.c 2011-08-05 19:44:37.000000000 -0400
30480 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30481 if (oprofile_ops.switch_events())
30482 return;
30483
30484 - atomic_inc(&oprofile_stats.multiplex_counter);
30485 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30486 start_switch_worker();
30487 }
30488
30489 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofilefs.c linux-2.6.39.4/drivers/oprofile/oprofilefs.c
30490 --- linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-05-19 00:06:34.000000000 -0400
30491 +++ linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-08-05 19:44:37.000000000 -0400
30492 @@ -186,7 +186,7 @@ static const struct file_operations atom
30493
30494
30495 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30496 - char const *name, atomic_t *val)
30497 + char const *name, atomic_unchecked_t *val)
30498 {
30499 return __oprofilefs_create_file(sb, root, name,
30500 &atomic_ro_fops, 0444, val);
30501 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.c linux-2.6.39.4/drivers/oprofile/oprofile_stats.c
30502 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-05-19 00:06:34.000000000 -0400
30503 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-08-05 19:44:37.000000000 -0400
30504 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30505 cpu_buf->sample_invalid_eip = 0;
30506 }
30507
30508 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30509 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30510 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30511 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30512 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30513 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30514 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30515 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30516 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30517 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30518 }
30519
30520
30521 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.h linux-2.6.39.4/drivers/oprofile/oprofile_stats.h
30522 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-05-19 00:06:34.000000000 -0400
30523 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-08-05 19:44:37.000000000 -0400
30524 @@ -13,11 +13,11 @@
30525 #include <asm/atomic.h>
30526
30527 struct oprofile_stat_struct {
30528 - atomic_t sample_lost_no_mm;
30529 - atomic_t sample_lost_no_mapping;
30530 - atomic_t bt_lost_no_mapping;
30531 - atomic_t event_lost_overflow;
30532 - atomic_t multiplex_counter;
30533 + atomic_unchecked_t sample_lost_no_mm;
30534 + atomic_unchecked_t sample_lost_no_mapping;
30535 + atomic_unchecked_t bt_lost_no_mapping;
30536 + atomic_unchecked_t event_lost_overflow;
30537 + atomic_unchecked_t multiplex_counter;
30538 };
30539
30540 extern struct oprofile_stat_struct oprofile_stats;
30541 diff -urNp linux-2.6.39.4/drivers/parport/procfs.c linux-2.6.39.4/drivers/parport/procfs.c
30542 --- linux-2.6.39.4/drivers/parport/procfs.c 2011-05-19 00:06:34.000000000 -0400
30543 +++ linux-2.6.39.4/drivers/parport/procfs.c 2011-08-05 19:44:37.000000000 -0400
30544 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30545
30546 *ppos += len;
30547
30548 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30549 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30550 }
30551
30552 #ifdef CONFIG_PARPORT_1284
30553 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30554
30555 *ppos += len;
30556
30557 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30558 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30559 }
30560 #endif /* IEEE1284.3 support. */
30561
30562 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h
30563 --- linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-05-19 00:06:34.000000000 -0400
30564 +++ linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:34:06.000000000 -0400
30565 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30566 int (*hardware_test) (struct slot* slot, u32 value);
30567 u8 (*get_power) (struct slot* slot);
30568 int (*set_power) (struct slot* slot, int value);
30569 -};
30570 +} __no_const;
30571
30572 struct cpci_hp_controller {
30573 unsigned int irq;
30574 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c
30575 --- linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-05-19 00:06:34.000000000 -0400
30576 +++ linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-05 19:44:37.000000000 -0400
30577 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30578
30579 void compaq_nvram_init (void __iomem *rom_start)
30580 {
30581 +
30582 +#ifndef CONFIG_PAX_KERNEXEC
30583 if (rom_start) {
30584 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30585 }
30586 +#endif
30587 +
30588 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30589
30590 /* initialize our int15 lock */
30591 diff -urNp linux-2.6.39.4/drivers/pci/pcie/aspm.c linux-2.6.39.4/drivers/pci/pcie/aspm.c
30592 --- linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-05-19 00:06:34.000000000 -0400
30593 +++ linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-08-05 19:44:37.000000000 -0400
30594 @@ -27,9 +27,9 @@
30595 #define MODULE_PARAM_PREFIX "pcie_aspm."
30596
30597 /* Note: those are not register definitions */
30598 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30599 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30600 -#define ASPM_STATE_L1 (4) /* L1 state */
30601 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30602 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30603 +#define ASPM_STATE_L1 (4U) /* L1 state */
30604 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30605 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30606
30607 diff -urNp linux-2.6.39.4/drivers/pci/probe.c linux-2.6.39.4/drivers/pci/probe.c
30608 --- linux-2.6.39.4/drivers/pci/probe.c 2011-05-19 00:06:34.000000000 -0400
30609 +++ linux-2.6.39.4/drivers/pci/probe.c 2011-08-05 20:34:06.000000000 -0400
30610 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
30611 return ret;
30612 }
30613
30614 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
30615 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
30616 struct device_attribute *attr,
30617 char *buf)
30618 {
30619 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
30620 }
30621
30622 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
30623 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
30624 struct device_attribute *attr,
30625 char *buf)
30626 {
30627 @@ -165,7 +165,7 @@ int __pci_read_base(struct pci_dev *dev,
30628 u32 l, sz, mask;
30629 u16 orig_cmd;
30630
30631 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30632 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30633
30634 if (!dev->mmio_always_on) {
30635 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30636 diff -urNp linux-2.6.39.4/drivers/pci/proc.c linux-2.6.39.4/drivers/pci/proc.c
30637 --- linux-2.6.39.4/drivers/pci/proc.c 2011-05-19 00:06:34.000000000 -0400
30638 +++ linux-2.6.39.4/drivers/pci/proc.c 2011-08-05 19:44:37.000000000 -0400
30639 @@ -476,7 +476,16 @@ static const struct file_operations proc
30640 static int __init pci_proc_init(void)
30641 {
30642 struct pci_dev *dev = NULL;
30643 +
30644 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30645 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30646 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30647 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30648 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30649 +#endif
30650 +#else
30651 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30652 +#endif
30653 proc_create("devices", 0, proc_bus_pci_dir,
30654 &proc_bus_pci_dev_operations);
30655 proc_initialized = 1;
30656 diff -urNp linux-2.6.39.4/drivers/pci/xen-pcifront.c linux-2.6.39.4/drivers/pci/xen-pcifront.c
30657 --- linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-05-19 00:06:34.000000000 -0400
30658 +++ linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-08-05 20:34:06.000000000 -0400
30659 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30660 struct pcifront_sd *sd = bus->sysdata;
30661 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30662
30663 + pax_track_stack();
30664 +
30665 if (verbose_request)
30666 dev_info(&pdev->xdev->dev,
30667 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30668 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30669 struct pcifront_sd *sd = bus->sysdata;
30670 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30671
30672 + pax_track_stack();
30673 +
30674 if (verbose_request)
30675 dev_info(&pdev->xdev->dev,
30676 "write dev=%04x:%02x:%02x.%01x - "
30677 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30678 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30679 struct msi_desc *entry;
30680
30681 + pax_track_stack();
30682 +
30683 if (nvec > SH_INFO_MAX_VEC) {
30684 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30685 " Increase SH_INFO_MAX_VEC.\n", nvec);
30686 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30687 struct pcifront_sd *sd = dev->bus->sysdata;
30688 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30689
30690 + pax_track_stack();
30691 +
30692 err = do_pci_op(pdev, &op);
30693
30694 /* What should do for error ? */
30695 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30696 struct pcifront_sd *sd = dev->bus->sysdata;
30697 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30698
30699 + pax_track_stack();
30700 +
30701 err = do_pci_op(pdev, &op);
30702 if (likely(!err)) {
30703 vector[0] = op.value;
30704 diff -urNp linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c
30705 --- linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-05-19 00:06:34.000000000 -0400
30706 +++ linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:34:06.000000000 -0400
30707 @@ -2109,7 +2109,7 @@ static int hotkey_mask_get(void)
30708 return 0;
30709 }
30710
30711 -void static hotkey_mask_warn_incomplete_mask(void)
30712 +static void hotkey_mask_warn_incomplete_mask(void)
30713 {
30714 /* log only what the user can fix... */
30715 const u32 wantedmask = hotkey_driver_mask &
30716 diff -urNp linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c
30717 --- linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-05-19 00:06:34.000000000 -0400
30718 +++ linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-05 19:44:37.000000000 -0400
30719 @@ -59,7 +59,7 @@ do { \
30720 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30721 } while(0)
30722
30723 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30724 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30725 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30726
30727 /*
30728 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30729
30730 cpu = get_cpu();
30731 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30732 +
30733 + pax_open_kernel();
30734 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30735 + pax_close_kernel();
30736
30737 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30738 spin_lock_irqsave(&pnp_bios_lock, flags);
30739 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30740 :"memory");
30741 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30742
30743 + pax_open_kernel();
30744 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30745 + pax_close_kernel();
30746 +
30747 put_cpu();
30748
30749 /* If we get here and this is set then the PnP BIOS faulted on us. */
30750 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30751 return status;
30752 }
30753
30754 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30755 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30756 {
30757 int i;
30758
30759 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30760 pnp_bios_callpoint.offset = header->fields.pm16offset;
30761 pnp_bios_callpoint.segment = PNP_CS16;
30762
30763 + pax_open_kernel();
30764 +
30765 for_each_possible_cpu(i) {
30766 struct desc_struct *gdt = get_cpu_gdt_table(i);
30767 if (!gdt)
30768 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30769 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30770 (unsigned long)__va(header->fields.pm16dseg));
30771 }
30772 +
30773 + pax_close_kernel();
30774 }
30775 diff -urNp linux-2.6.39.4/drivers/pnp/resource.c linux-2.6.39.4/drivers/pnp/resource.c
30776 --- linux-2.6.39.4/drivers/pnp/resource.c 2011-05-19 00:06:34.000000000 -0400
30777 +++ linux-2.6.39.4/drivers/pnp/resource.c 2011-08-05 19:44:37.000000000 -0400
30778 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30779 return 1;
30780
30781 /* check if the resource is valid */
30782 - if (*irq < 0 || *irq > 15)
30783 + if (*irq > 15)
30784 return 0;
30785
30786 /* check if the resource is reserved */
30787 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30788 return 1;
30789
30790 /* check if the resource is valid */
30791 - if (*dma < 0 || *dma == 4 || *dma > 7)
30792 + if (*dma == 4 || *dma > 7)
30793 return 0;
30794
30795 /* check if the resource is reserved */
30796 diff -urNp linux-2.6.39.4/drivers/power/bq27x00_battery.c linux-2.6.39.4/drivers/power/bq27x00_battery.c
30797 --- linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-05-19 00:06:34.000000000 -0400
30798 +++ linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-08-05 20:34:06.000000000 -0400
30799 @@ -66,7 +66,7 @@
30800 struct bq27x00_device_info;
30801 struct bq27x00_access_methods {
30802 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30803 -};
30804 +} __no_const;
30805
30806 enum bq27x00_chip { BQ27000, BQ27500 };
30807
30808 diff -urNp linux-2.6.39.4/drivers/regulator/max8660.c linux-2.6.39.4/drivers/regulator/max8660.c
30809 --- linux-2.6.39.4/drivers/regulator/max8660.c 2011-05-19 00:06:34.000000000 -0400
30810 +++ linux-2.6.39.4/drivers/regulator/max8660.c 2011-08-05 20:34:06.000000000 -0400
30811 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30812 max8660->shadow_regs[MAX8660_OVER1] = 5;
30813 } else {
30814 /* Otherwise devices can be toggled via software */
30815 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30816 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30817 + pax_open_kernel();
30818 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30819 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30820 + pax_close_kernel();
30821 }
30822
30823 /*
30824 diff -urNp linux-2.6.39.4/drivers/regulator/mc13892-regulator.c linux-2.6.39.4/drivers/regulator/mc13892-regulator.c
30825 --- linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-05-19 00:06:34.000000000 -0400
30826 +++ linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-08-05 20:34:06.000000000 -0400
30827 @@ -560,10 +560,12 @@ static int __devinit mc13892_regulator_p
30828 }
30829 mc13xxx_unlock(mc13892);
30830
30831 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30832 + pax_open_kernel();
30833 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30834 = mc13892_vcam_set_mode;
30835 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30836 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30837 = mc13892_vcam_get_mode;
30838 + pax_close_kernel();
30839 for (i = 0; i < pdata->num_regulators; i++) {
30840 init_data = &pdata->regulators[i];
30841 priv->regulators[i] = regulator_register(
30842 diff -urNp linux-2.6.39.4/drivers/rtc/rtc-dev.c linux-2.6.39.4/drivers/rtc/rtc-dev.c
30843 --- linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-05-19 00:06:34.000000000 -0400
30844 +++ linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-08-05 19:44:37.000000000 -0400
30845 @@ -14,6 +14,7 @@
30846 #include <linux/module.h>
30847 #include <linux/rtc.h>
30848 #include <linux/sched.h>
30849 +#include <linux/grsecurity.h>
30850 #include "rtc-core.h"
30851
30852 static dev_t rtc_devt;
30853 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30854 if (copy_from_user(&tm, uarg, sizeof(tm)))
30855 return -EFAULT;
30856
30857 + gr_log_timechange();
30858 +
30859 return rtc_set_time(rtc, &tm);
30860
30861 case RTC_PIE_ON:
30862 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h
30863 --- linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-05-19 00:06:34.000000000 -0400
30864 +++ linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:34:06.000000000 -0400
30865 @@ -492,7 +492,7 @@ struct adapter_ops
30866 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30867 /* Administrative operations */
30868 int (*adapter_comm)(struct aac_dev * dev, int comm);
30869 -};
30870 +} __no_const;
30871
30872 /*
30873 * Define which interrupt handler needs to be installed
30874 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c
30875 --- linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-05-19 00:06:34.000000000 -0400
30876 +++ linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-08-05 19:44:37.000000000 -0400
30877 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30878 u32 actual_fibsize64, actual_fibsize = 0;
30879 int i;
30880
30881 + pax_track_stack();
30882
30883 if (dev->in_reset) {
30884 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30885 diff -urNp linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c
30886 --- linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-05-19 00:06:34.000000000 -0400
30887 +++ linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-08-05 19:44:37.000000000 -0400
30888 @@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(stru
30889 flash_error_table[i].reason);
30890 }
30891
30892 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
30893 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
30894 asd_show_update_bios, asd_store_update_bios);
30895
30896 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
30897 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfad.c linux-2.6.39.4/drivers/scsi/bfa/bfad.c
30898 --- linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-05-19 00:06:34.000000000 -0400
30899 +++ linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-08-05 19:44:37.000000000 -0400
30900 @@ -1027,6 +1027,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30901 struct bfad_vport_s *vport, *vport_new;
30902 struct bfa_fcs_driver_info_s driver_info;
30903
30904 + pax_track_stack();
30905 +
30906 /* Fill the driver_info info to fcs*/
30907 memset(&driver_info, 0, sizeof(driver_info));
30908 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30909 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c
30910 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-05-19 00:06:34.000000000 -0400
30911 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-05 19:44:37.000000000 -0400
30912 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30913 u16 len, count;
30914 u16 templen;
30915
30916 + pax_track_stack();
30917 +
30918 /*
30919 * get hba attributes
30920 */
30921 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30922 u8 count = 0;
30923 u16 templen;
30924
30925 + pax_track_stack();
30926 +
30927 /*
30928 * get port attributes
30929 */
30930 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c
30931 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-05-19 00:06:34.000000000 -0400
30932 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-05 19:44:37.000000000 -0400
30933 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30934 struct fc_rpsc_speed_info_s speeds;
30935 struct bfa_port_attr_s pport_attr;
30936
30937 + pax_track_stack();
30938 +
30939 bfa_trc(port->fcs, rx_fchs->s_id);
30940 bfa_trc(port->fcs, rx_fchs->d_id);
30941
30942 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa.h linux-2.6.39.4/drivers/scsi/bfa/bfa.h
30943 --- linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-05-19 00:06:34.000000000 -0400
30944 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-08-05 20:34:06.000000000 -0400
30945 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30946 u32 *nvecs, u32 *maxvec);
30947 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30948 u32 *end);
30949 -};
30950 +} __no_const;
30951 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30952
30953 struct bfa_iocfc_s {
30954 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h
30955 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-05-19 00:06:34.000000000 -0400
30956 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:34:06.000000000 -0400
30957 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30958 bfa_ioc_disable_cbfn_t disable_cbfn;
30959 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
30960 bfa_ioc_reset_cbfn_t reset_cbfn;
30961 -};
30962 +} __no_const;
30963
30964 /*
30965 * Heartbeat failure notification queue element.
30966 @@ -267,7 +267,7 @@ struct bfa_ioc_hwif_s {
30967 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
30968 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
30969 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
30970 -};
30971 +} __no_const;
30972
30973 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
30974 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
30975 diff -urNp linux-2.6.39.4/drivers/scsi/BusLogic.c linux-2.6.39.4/drivers/scsi/BusLogic.c
30976 --- linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-05-19 00:06:34.000000000 -0400
30977 +++ linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-08-05 19:44:37.000000000 -0400
30978 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
30979 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
30980 *PrototypeHostAdapter)
30981 {
30982 + pax_track_stack();
30983 +
30984 /*
30985 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
30986 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
30987 diff -urNp linux-2.6.39.4/drivers/scsi/dpt_i2o.c linux-2.6.39.4/drivers/scsi/dpt_i2o.c
30988 --- linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-05-19 00:06:34.000000000 -0400
30989 +++ linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-08-05 19:44:37.000000000 -0400
30990 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
30991 dma_addr_t addr;
30992 ulong flags = 0;
30993
30994 + pax_track_stack();
30995 +
30996 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
30997 // get user msg size in u32s
30998 if(get_user(size, &user_msg[0])){
30999 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31000 s32 rcode;
31001 dma_addr_t addr;
31002
31003 + pax_track_stack();
31004 +
31005 memset(msg, 0 , sizeof(msg));
31006 len = scsi_bufflen(cmd);
31007 direction = 0x00000000;
31008 diff -urNp linux-2.6.39.4/drivers/scsi/eata.c linux-2.6.39.4/drivers/scsi/eata.c
31009 --- linux-2.6.39.4/drivers/scsi/eata.c 2011-05-19 00:06:34.000000000 -0400
31010 +++ linux-2.6.39.4/drivers/scsi/eata.c 2011-08-05 19:44:37.000000000 -0400
31011 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31012 struct hostdata *ha;
31013 char name[16];
31014
31015 + pax_track_stack();
31016 +
31017 sprintf(name, "%s%d", driver_name, j);
31018
31019 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31020 diff -urNp linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c
31021 --- linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-05-19 00:06:34.000000000 -0400
31022 +++ linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-05 20:34:06.000000000 -0400
31023 @@ -2458,6 +2458,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31024 } buf;
31025 int rc;
31026
31027 + pax_track_stack();
31028 +
31029 fiph = (struct fip_header *)skb->data;
31030 sub = fiph->fip_subcode;
31031
31032 diff -urNp linux-2.6.39.4/drivers/scsi/gdth.c linux-2.6.39.4/drivers/scsi/gdth.c
31033 --- linux-2.6.39.4/drivers/scsi/gdth.c 2011-05-19 00:06:34.000000000 -0400
31034 +++ linux-2.6.39.4/drivers/scsi/gdth.c 2011-08-05 19:44:37.000000000 -0400
31035 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31036 unsigned long flags;
31037 gdth_ha_str *ha;
31038
31039 + pax_track_stack();
31040 +
31041 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31042 return -EFAULT;
31043 ha = gdth_find_ha(ldrv.ionode);
31044 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31045 gdth_ha_str *ha;
31046 int rval;
31047
31048 + pax_track_stack();
31049 +
31050 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31051 res.number >= MAX_HDRIVES)
31052 return -EFAULT;
31053 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31054 gdth_ha_str *ha;
31055 int rval;
31056
31057 + pax_track_stack();
31058 +
31059 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31060 return -EFAULT;
31061 ha = gdth_find_ha(gen.ionode);
31062 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31063 int i;
31064 gdth_cmd_str gdtcmd;
31065 char cmnd[MAX_COMMAND_SIZE];
31066 +
31067 + pax_track_stack();
31068 +
31069 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31070
31071 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31072 diff -urNp linux-2.6.39.4/drivers/scsi/gdth_proc.c linux-2.6.39.4/drivers/scsi/gdth_proc.c
31073 --- linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-05-19 00:06:34.000000000 -0400
31074 +++ linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-08-05 19:44:37.000000000 -0400
31075 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31076 u64 paddr;
31077
31078 char cmnd[MAX_COMMAND_SIZE];
31079 +
31080 + pax_track_stack();
31081 +
31082 memset(cmnd, 0xff, 12);
31083 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
31084
31085 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
31086 gdth_hget_str *phg;
31087 char cmnd[MAX_COMMAND_SIZE];
31088
31089 + pax_track_stack();
31090 +
31091 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
31092 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
31093 if (!gdtcmd || !estr)
31094 diff -urNp linux-2.6.39.4/drivers/scsi/hosts.c linux-2.6.39.4/drivers/scsi/hosts.c
31095 --- linux-2.6.39.4/drivers/scsi/hosts.c 2011-05-19 00:06:34.000000000 -0400
31096 +++ linux-2.6.39.4/drivers/scsi/hosts.c 2011-08-05 19:44:37.000000000 -0400
31097 @@ -42,7 +42,7 @@
31098 #include "scsi_logging.h"
31099
31100
31101 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
31102 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
31103
31104
31105 static void scsi_host_cls_release(struct device *dev)
31106 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
31107 * subtract one because we increment first then return, but we need to
31108 * know what the next host number was before increment
31109 */
31110 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
31111 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
31112 shost->dma_channel = 0xff;
31113
31114 /* These three are default values which can be overridden */
31115 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.c linux-2.6.39.4/drivers/scsi/hpsa.c
31116 --- linux-2.6.39.4/drivers/scsi/hpsa.c 2011-05-19 00:06:34.000000000 -0400
31117 +++ linux-2.6.39.4/drivers/scsi/hpsa.c 2011-08-05 20:34:06.000000000 -0400
31118 @@ -469,7 +469,7 @@ static inline u32 next_command(struct ct
31119 u32 a;
31120
31121 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31122 - return h->access.command_completed(h);
31123 + return h->access->command_completed(h);
31124
31125 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31126 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31127 @@ -2889,7 +2889,7 @@ static void start_io(struct ctlr_info *h
31128 while (!list_empty(&h->reqQ)) {
31129 c = list_entry(h->reqQ.next, struct CommandList, list);
31130 /* can't do anything if fifo is full */
31131 - if ((h->access.fifo_full(h))) {
31132 + if ((h->access->fifo_full(h))) {
31133 dev_warn(&h->pdev->dev, "fifo full\n");
31134 break;
31135 }
31136 @@ -2899,7 +2899,7 @@ static void start_io(struct ctlr_info *h
31137 h->Qdepth--;
31138
31139 /* Tell the controller execute command */
31140 - h->access.submit_command(h, c);
31141 + h->access->submit_command(h, c);
31142
31143 /* Put job onto the completed Q */
31144 addQ(&h->cmpQ, c);
31145 @@ -2908,17 +2908,17 @@ static void start_io(struct ctlr_info *h
31146
31147 static inline unsigned long get_next_completion(struct ctlr_info *h)
31148 {
31149 - return h->access.command_completed(h);
31150 + return h->access->command_completed(h);
31151 }
31152
31153 static inline bool interrupt_pending(struct ctlr_info *h)
31154 {
31155 - return h->access.intr_pending(h);
31156 + return h->access->intr_pending(h);
31157 }
31158
31159 static inline long interrupt_not_for_us(struct ctlr_info *h)
31160 {
31161 - return (h->access.intr_pending(h) == 0) ||
31162 + return (h->access->intr_pending(h) == 0) ||
31163 (h->interrupts_enabled == 0);
31164 }
31165
31166 @@ -3684,7 +3684,7 @@ static int __devinit hpsa_pci_init(struc
31167 if (prod_index < 0)
31168 return -ENODEV;
31169 h->product_name = products[prod_index].product_name;
31170 - h->access = *(products[prod_index].access);
31171 + h->access = products[prod_index].access;
31172
31173 if (hpsa_board_disabled(h->pdev)) {
31174 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31175 @@ -3845,7 +3845,7 @@ static int __devinit hpsa_init_one(struc
31176 }
31177
31178 /* make sure the board interrupts are off */
31179 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31180 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31181
31182 if (h->msix_vector || h->msi_vector)
31183 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
31184 @@ -3892,7 +3892,7 @@ static int __devinit hpsa_init_one(struc
31185 hpsa_scsi_setup(h);
31186
31187 /* Turn the interrupts on so we can service requests */
31188 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31189 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31190
31191 hpsa_put_ctlr_into_performant_mode(h);
31192 hpsa_hba_inquiry(h);
31193 @@ -3955,7 +3955,7 @@ static void hpsa_shutdown(struct pci_dev
31194 * To write all data in the battery backed cache to disks
31195 */
31196 hpsa_flush_cache(h);
31197 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31198 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31199 free_irq(h->intr[h->intr_mode], h);
31200 #ifdef CONFIG_PCI_MSI
31201 if (h->msix_vector)
31202 @@ -4118,7 +4118,7 @@ static __devinit void hpsa_enter_perform
31203 return;
31204 }
31205 /* Change the access methods to the performant access methods */
31206 - h->access = SA5_performant_access;
31207 + h->access = &SA5_performant_access;
31208 h->transMethod = CFGTBL_Trans_Performant;
31209 }
31210
31211 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.h linux-2.6.39.4/drivers/scsi/hpsa.h
31212 --- linux-2.6.39.4/drivers/scsi/hpsa.h 2011-05-19 00:06:34.000000000 -0400
31213 +++ linux-2.6.39.4/drivers/scsi/hpsa.h 2011-08-05 20:34:06.000000000 -0400
31214 @@ -73,7 +73,7 @@ struct ctlr_info {
31215 unsigned int msix_vector;
31216 unsigned int msi_vector;
31217 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31218 - struct access_method access;
31219 + struct access_method *access;
31220
31221 /* queue and queue Info */
31222 struct list_head reqQ;
31223 diff -urNp linux-2.6.39.4/drivers/scsi/ips.h linux-2.6.39.4/drivers/scsi/ips.h
31224 --- linux-2.6.39.4/drivers/scsi/ips.h 2011-05-19 00:06:34.000000000 -0400
31225 +++ linux-2.6.39.4/drivers/scsi/ips.h 2011-08-05 20:34:06.000000000 -0400
31226 @@ -1027,7 +1027,7 @@ typedef struct {
31227 int (*intr)(struct ips_ha *);
31228 void (*enableint)(struct ips_ha *);
31229 uint32_t (*statupd)(struct ips_ha *);
31230 -} ips_hw_func_t;
31231 +} __no_const ips_hw_func_t;
31232
31233 typedef struct ips_ha {
31234 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31235 diff -urNp linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c
31236 --- linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-05-19 00:06:34.000000000 -0400
31237 +++ linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-08-05 19:44:37.000000000 -0400
31238 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31239 * all together if not used XXX
31240 */
31241 struct {
31242 - atomic_t no_free_exch;
31243 - atomic_t no_free_exch_xid;
31244 - atomic_t xid_not_found;
31245 - atomic_t xid_busy;
31246 - atomic_t seq_not_found;
31247 - atomic_t non_bls_resp;
31248 + atomic_unchecked_t no_free_exch;
31249 + atomic_unchecked_t no_free_exch_xid;
31250 + atomic_unchecked_t xid_not_found;
31251 + atomic_unchecked_t xid_busy;
31252 + atomic_unchecked_t seq_not_found;
31253 + atomic_unchecked_t non_bls_resp;
31254 } stats;
31255 };
31256
31257 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31258 /* allocate memory for exchange */
31259 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31260 if (!ep) {
31261 - atomic_inc(&mp->stats.no_free_exch);
31262 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31263 goto out;
31264 }
31265 memset(ep, 0, sizeof(*ep));
31266 @@ -761,7 +761,7 @@ out:
31267 return ep;
31268 err:
31269 spin_unlock_bh(&pool->lock);
31270 - atomic_inc(&mp->stats.no_free_exch_xid);
31271 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31272 mempool_free(ep, mp->ep_pool);
31273 return NULL;
31274 }
31275 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31276 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31277 ep = fc_exch_find(mp, xid);
31278 if (!ep) {
31279 - atomic_inc(&mp->stats.xid_not_found);
31280 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31281 reject = FC_RJT_OX_ID;
31282 goto out;
31283 }
31284 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31285 ep = fc_exch_find(mp, xid);
31286 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31287 if (ep) {
31288 - atomic_inc(&mp->stats.xid_busy);
31289 + atomic_inc_unchecked(&mp->stats.xid_busy);
31290 reject = FC_RJT_RX_ID;
31291 goto rel;
31292 }
31293 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31294 }
31295 xid = ep->xid; /* get our XID */
31296 } else if (!ep) {
31297 - atomic_inc(&mp->stats.xid_not_found);
31298 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31299 reject = FC_RJT_RX_ID; /* XID not found */
31300 goto out;
31301 }
31302 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31303 } else {
31304 sp = &ep->seq;
31305 if (sp->id != fh->fh_seq_id) {
31306 - atomic_inc(&mp->stats.seq_not_found);
31307 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31308 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31309 goto rel;
31310 }
31311 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31312
31313 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31314 if (!ep) {
31315 - atomic_inc(&mp->stats.xid_not_found);
31316 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31317 goto out;
31318 }
31319 if (ep->esb_stat & ESB_ST_COMPLETE) {
31320 - atomic_inc(&mp->stats.xid_not_found);
31321 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31322 goto rel;
31323 }
31324 if (ep->rxid == FC_XID_UNKNOWN)
31325 ep->rxid = ntohs(fh->fh_rx_id);
31326 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31327 - atomic_inc(&mp->stats.xid_not_found);
31328 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31329 goto rel;
31330 }
31331 if (ep->did != ntoh24(fh->fh_s_id) &&
31332 ep->did != FC_FID_FLOGI) {
31333 - atomic_inc(&mp->stats.xid_not_found);
31334 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31335 goto rel;
31336 }
31337 sof = fr_sof(fp);
31338 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31339 sp->ssb_stat |= SSB_ST_RESP;
31340 sp->id = fh->fh_seq_id;
31341 } else if (sp->id != fh->fh_seq_id) {
31342 - atomic_inc(&mp->stats.seq_not_found);
31343 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31344 goto rel;
31345 }
31346
31347 @@ -1479,9 +1479,9 @@ static void fc_exch_recv_resp(struct fc_
31348 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31349
31350 if (!sp)
31351 - atomic_inc(&mp->stats.xid_not_found);
31352 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31353 else
31354 - atomic_inc(&mp->stats.non_bls_resp);
31355 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31356
31357 fc_frame_free(fp);
31358 }
31359 diff -urNp linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c
31360 --- linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-05-19 00:06:34.000000000 -0400
31361 +++ linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-08-05 20:34:06.000000000 -0400
31362 @@ -314,7 +314,7 @@ static struct ata_port_operations sas_sa
31363 .postreset = ata_std_postreset,
31364 .error_handler = ata_std_error_handler,
31365 .post_internal_cmd = sas_ata_post_internal,
31366 - .qc_defer = ata_std_qc_defer,
31367 + .qc_defer = ata_std_qc_defer,
31368 .qc_prep = ata_noop_qc_prep,
31369 .qc_issue = sas_ata_qc_issue,
31370 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31371 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c
31372 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-19 00:06:34.000000000 -0400
31373 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-05 19:44:37.000000000 -0400
31374 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31375
31376 #include <linux/debugfs.h>
31377
31378 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31379 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31380 static unsigned long lpfc_debugfs_start_time = 0L;
31381
31382 /* iDiag */
31383 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31384 lpfc_debugfs_enable = 0;
31385
31386 len = 0;
31387 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31388 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31389 (lpfc_debugfs_max_disc_trc - 1);
31390 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31391 dtp = vport->disc_trc + i;
31392 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31393 lpfc_debugfs_enable = 0;
31394
31395 len = 0;
31396 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31397 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31398 (lpfc_debugfs_max_slow_ring_trc - 1);
31399 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31400 dtp = phba->slow_ring_trc + i;
31401 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31402 uint32_t *ptr;
31403 char buffer[1024];
31404
31405 + pax_track_stack();
31406 +
31407 off = 0;
31408 spin_lock_irq(&phba->hbalock);
31409
31410 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31411 !vport || !vport->disc_trc)
31412 return;
31413
31414 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31415 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31416 (lpfc_debugfs_max_disc_trc - 1);
31417 dtp = vport->disc_trc + index;
31418 dtp->fmt = fmt;
31419 dtp->data1 = data1;
31420 dtp->data2 = data2;
31421 dtp->data3 = data3;
31422 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31423 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31424 dtp->jif = jiffies;
31425 #endif
31426 return;
31427 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31428 !phba || !phba->slow_ring_trc)
31429 return;
31430
31431 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31432 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31433 (lpfc_debugfs_max_slow_ring_trc - 1);
31434 dtp = phba->slow_ring_trc + index;
31435 dtp->fmt = fmt;
31436 dtp->data1 = data1;
31437 dtp->data2 = data2;
31438 dtp->data3 = data3;
31439 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31440 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31441 dtp->jif = jiffies;
31442 #endif
31443 return;
31444 @@ -2145,7 +2147,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31445 "slow_ring buffer\n");
31446 goto debug_failed;
31447 }
31448 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31449 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31450 memset(phba->slow_ring_trc, 0,
31451 (sizeof(struct lpfc_debugfs_trc) *
31452 lpfc_debugfs_max_slow_ring_trc));
31453 @@ -2191,7 +2193,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31454 "buffer\n");
31455 goto debug_failed;
31456 }
31457 - atomic_set(&vport->disc_trc_cnt, 0);
31458 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31459
31460 snprintf(name, sizeof(name), "discovery_trace");
31461 vport->debug_disc_trc =
31462 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h
31463 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-05-19 00:06:34.000000000 -0400
31464 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-08-05 19:44:37.000000000 -0400
31465 @@ -419,7 +419,7 @@ struct lpfc_vport {
31466 struct dentry *debug_nodelist;
31467 struct dentry *vport_debugfs_root;
31468 struct lpfc_debugfs_trc *disc_trc;
31469 - atomic_t disc_trc_cnt;
31470 + atomic_unchecked_t disc_trc_cnt;
31471 #endif
31472 uint8_t stat_data_enabled;
31473 uint8_t stat_data_blocked;
31474 @@ -785,8 +785,8 @@ struct lpfc_hba {
31475 struct timer_list fabric_block_timer;
31476 unsigned long bit_flags;
31477 #define FABRIC_COMANDS_BLOCKED 0
31478 - atomic_t num_rsrc_err;
31479 - atomic_t num_cmd_success;
31480 + atomic_unchecked_t num_rsrc_err;
31481 + atomic_unchecked_t num_cmd_success;
31482 unsigned long last_rsrc_error_time;
31483 unsigned long last_ramp_down_time;
31484 unsigned long last_ramp_up_time;
31485 @@ -800,7 +800,7 @@ struct lpfc_hba {
31486 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31487 struct dentry *debug_slow_ring_trc;
31488 struct lpfc_debugfs_trc *slow_ring_trc;
31489 - atomic_t slow_ring_trc_cnt;
31490 + atomic_unchecked_t slow_ring_trc_cnt;
31491 /* iDiag debugfs sub-directory */
31492 struct dentry *idiag_root;
31493 struct dentry *idiag_pci_cfg;
31494 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c
31495 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-05-19 00:06:34.000000000 -0400
31496 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:34:06.000000000 -0400
31497 @@ -9535,8 +9535,10 @@ lpfc_init(void)
31498 printk(LPFC_COPYRIGHT "\n");
31499
31500 if (lpfc_enable_npiv) {
31501 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31502 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31503 + pax_open_kernel();
31504 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31505 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31506 + pax_close_kernel();
31507 }
31508 lpfc_transport_template =
31509 fc_attach_transport(&lpfc_transport_functions);
31510 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c
31511 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-19 00:06:34.000000000 -0400
31512 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-05 19:44:37.000000000 -0400
31513 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31514 uint32_t evt_posted;
31515
31516 spin_lock_irqsave(&phba->hbalock, flags);
31517 - atomic_inc(&phba->num_rsrc_err);
31518 + atomic_inc_unchecked(&phba->num_rsrc_err);
31519 phba->last_rsrc_error_time = jiffies;
31520
31521 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31522 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31523 unsigned long flags;
31524 struct lpfc_hba *phba = vport->phba;
31525 uint32_t evt_posted;
31526 - atomic_inc(&phba->num_cmd_success);
31527 + atomic_inc_unchecked(&phba->num_cmd_success);
31528
31529 if (vport->cfg_lun_queue_depth <= queue_depth)
31530 return;
31531 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31532 unsigned long num_rsrc_err, num_cmd_success;
31533 int i;
31534
31535 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31536 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31537 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31538 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31539
31540 vports = lpfc_create_vport_work_array(phba);
31541 if (vports != NULL)
31542 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31543 }
31544 }
31545 lpfc_destroy_vport_work_array(phba, vports);
31546 - atomic_set(&phba->num_rsrc_err, 0);
31547 - atomic_set(&phba->num_cmd_success, 0);
31548 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31549 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31550 }
31551
31552 /**
31553 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31554 }
31555 }
31556 lpfc_destroy_vport_work_array(phba, vports);
31557 - atomic_set(&phba->num_rsrc_err, 0);
31558 - atomic_set(&phba->num_cmd_success, 0);
31559 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31560 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31561 }
31562
31563 /**
31564 diff -urNp linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c
31565 --- linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-19 00:06:34.000000000 -0400
31566 +++ linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-05 19:44:37.000000000 -0400
31567 @@ -3510,6 +3510,8 @@ megaraid_cmm_register(adapter_t *adapter
31568 int rval;
31569 int i;
31570
31571 + pax_track_stack();
31572 +
31573 // Allocate memory for the base list of scb for management module.
31574 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31575
31576 diff -urNp linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c
31577 --- linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-05-19 00:06:34.000000000 -0400
31578 +++ linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-08-05 19:44:37.000000000 -0400
31579 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31580 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31581 int ret;
31582
31583 + pax_track_stack();
31584 +
31585 or = osd_start_request(od, GFP_KERNEL);
31586 if (!or)
31587 return -ENOMEM;
31588 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.c linux-2.6.39.4/drivers/scsi/pmcraid.c
31589 --- linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-05-19 00:06:34.000000000 -0400
31590 +++ linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-08-05 19:44:37.000000000 -0400
31591 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31592 res->scsi_dev = scsi_dev;
31593 scsi_dev->hostdata = res;
31594 res->change_detected = 0;
31595 - atomic_set(&res->read_failures, 0);
31596 - atomic_set(&res->write_failures, 0);
31597 + atomic_set_unchecked(&res->read_failures, 0);
31598 + atomic_set_unchecked(&res->write_failures, 0);
31599 rc = 0;
31600 }
31601 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31602 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31603
31604 /* If this was a SCSI read/write command keep count of errors */
31605 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31606 - atomic_inc(&res->read_failures);
31607 + atomic_inc_unchecked(&res->read_failures);
31608 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31609 - atomic_inc(&res->write_failures);
31610 + atomic_inc_unchecked(&res->write_failures);
31611
31612 if (!RES_IS_GSCSI(res->cfg_entry) &&
31613 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31614 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31615 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31616 * hrrq_id assigned here in queuecommand
31617 */
31618 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31619 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31620 pinstance->num_hrrq;
31621 cmd->cmd_done = pmcraid_io_done;
31622
31623 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31624 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31625 * hrrq_id assigned here in queuecommand
31626 */
31627 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31628 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31629 pinstance->num_hrrq;
31630
31631 if (request_size) {
31632 @@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(stru
31633
31634 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31635 /* add resources only after host is added into system */
31636 - if (!atomic_read(&pinstance->expose_resources))
31637 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31638 return;
31639
31640 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31641 @@ -5329,8 +5329,8 @@ static int __devinit pmcraid_init_instan
31642 init_waitqueue_head(&pinstance->reset_wait_q);
31643
31644 atomic_set(&pinstance->outstanding_cmds, 0);
31645 - atomic_set(&pinstance->last_message_id, 0);
31646 - atomic_set(&pinstance->expose_resources, 0);
31647 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31648 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31649
31650 INIT_LIST_HEAD(&pinstance->free_res_q);
31651 INIT_LIST_HEAD(&pinstance->used_res_q);
31652 @@ -6045,7 +6045,7 @@ static int __devinit pmcraid_probe(
31653 /* Schedule worker thread to handle CCN and take care of adding and
31654 * removing devices to OS
31655 */
31656 - atomic_set(&pinstance->expose_resources, 1);
31657 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31658 schedule_work(&pinstance->worker_q);
31659 return rc;
31660
31661 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.h linux-2.6.39.4/drivers/scsi/pmcraid.h
31662 --- linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-05-19 00:06:34.000000000 -0400
31663 +++ linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-08-05 19:44:37.000000000 -0400
31664 @@ -750,7 +750,7 @@ struct pmcraid_instance {
31665 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31666
31667 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31668 - atomic_t last_message_id;
31669 + atomic_unchecked_t last_message_id;
31670
31671 /* configuration table */
31672 struct pmcraid_config_table *cfg_table;
31673 @@ -779,7 +779,7 @@ struct pmcraid_instance {
31674 atomic_t outstanding_cmds;
31675
31676 /* should add/delete resources to mid-layer now ?*/
31677 - atomic_t expose_resources;
31678 + atomic_unchecked_t expose_resources;
31679
31680
31681
31682 @@ -815,8 +815,8 @@ struct pmcraid_resource_entry {
31683 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31684 };
31685 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31686 - atomic_t read_failures; /* count of failed READ commands */
31687 - atomic_t write_failures; /* count of failed WRITE commands */
31688 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31689 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31690
31691 /* To indicate add/delete/modify during CCN */
31692 u8 change_detected;
31693 diff -urNp linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h
31694 --- linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-05-19 00:06:34.000000000 -0400
31695 +++ linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:34:06.000000000 -0400
31696 @@ -2236,7 +2236,7 @@ struct isp_operations {
31697 int (*get_flash_version) (struct scsi_qla_host *, void *);
31698 int (*start_scsi) (srb_t *);
31699 int (*abort_isp) (struct scsi_qla_host *);
31700 -};
31701 +} __no_const;
31702
31703 /* MSI-X Support *************************************************************/
31704
31705 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h
31706 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-05-19 00:06:34.000000000 -0400
31707 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-05 19:44:37.000000000 -0400
31708 @@ -256,7 +256,7 @@ struct ddb_entry {
31709 atomic_t retry_relogin_timer; /* Min Time between relogins
31710 * (4000 only) */
31711 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31712 - atomic_t relogin_retry_count; /* Num of times relogin has been
31713 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31714 * retried */
31715
31716 uint16_t port;
31717 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c
31718 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-05-19 00:06:34.000000000 -0400
31719 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-05 19:44:37.000000000 -0400
31720 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31721 ddb_entry->fw_ddb_index = fw_ddb_index;
31722 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31723 atomic_set(&ddb_entry->relogin_timer, 0);
31724 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31725 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31726 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31727 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31728 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31729 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31730 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31731 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31732 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31733 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31734 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31735 atomic_set(&ddb_entry->relogin_timer, 0);
31736 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31737 iscsi_unblock_session(ddb_entry->sess);
31738 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c
31739 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-05-19 00:06:34.000000000 -0400
31740 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-05 19:44:37.000000000 -0400
31741 @@ -802,13 +802,13 @@ static void qla4xxx_timer(struct scsi_ql
31742 ddb_entry->fw_ddb_device_state ==
31743 DDB_DS_SESSION_FAILED) {
31744 /* Reset retry relogin timer */
31745 - atomic_inc(&ddb_entry->relogin_retry_count);
31746 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31747 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31748 " timed out-retrying"
31749 " relogin (%d)\n",
31750 ha->host_no,
31751 ddb_entry->fw_ddb_index,
31752 - atomic_read(&ddb_entry->
31753 + atomic_read_unchecked(&ddb_entry->
31754 relogin_retry_count))
31755 );
31756 start_dpc++;
31757 diff -urNp linux-2.6.39.4/drivers/scsi/scsi.c linux-2.6.39.4/drivers/scsi/scsi.c
31758 --- linux-2.6.39.4/drivers/scsi/scsi.c 2011-05-19 00:06:34.000000000 -0400
31759 +++ linux-2.6.39.4/drivers/scsi/scsi.c 2011-08-05 19:44:37.000000000 -0400
31760 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31761 unsigned long timeout;
31762 int rtn = 0;
31763
31764 - atomic_inc(&cmd->device->iorequest_cnt);
31765 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31766
31767 /* check if the device is still usable */
31768 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31769 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_debug.c linux-2.6.39.4/drivers/scsi/scsi_debug.c
31770 --- linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-05-19 00:06:34.000000000 -0400
31771 +++ linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-08-05 19:44:37.000000000 -0400
31772 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31773 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31774 unsigned char *cmd = (unsigned char *)scp->cmnd;
31775
31776 + pax_track_stack();
31777 +
31778 if ((errsts = check_readiness(scp, 1, devip)))
31779 return errsts;
31780 memset(arr, 0, sizeof(arr));
31781 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31782 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31783 unsigned char *cmd = (unsigned char *)scp->cmnd;
31784
31785 + pax_track_stack();
31786 +
31787 if ((errsts = check_readiness(scp, 1, devip)))
31788 return errsts;
31789 memset(arr, 0, sizeof(arr));
31790 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_lib.c linux-2.6.39.4/drivers/scsi/scsi_lib.c
31791 --- linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-05-19 00:06:34.000000000 -0400
31792 +++ linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-08-05 19:44:37.000000000 -0400
31793 @@ -1410,7 +1410,7 @@ static void scsi_kill_request(struct req
31794 shost = sdev->host;
31795 scsi_init_cmd_errh(cmd);
31796 cmd->result = DID_NO_CONNECT << 16;
31797 - atomic_inc(&cmd->device->iorequest_cnt);
31798 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31799
31800 /*
31801 * SCSI request completion path will do scsi_device_unbusy(),
31802 @@ -1436,9 +1436,9 @@ static void scsi_softirq_done(struct req
31803
31804 INIT_LIST_HEAD(&cmd->eh_entry);
31805
31806 - atomic_inc(&cmd->device->iodone_cnt);
31807 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31808 if (cmd->result)
31809 - atomic_inc(&cmd->device->ioerr_cnt);
31810 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31811
31812 disposition = scsi_decide_disposition(cmd);
31813 if (disposition != SUCCESS &&
31814 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_sysfs.c linux-2.6.39.4/drivers/scsi/scsi_sysfs.c
31815 --- linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:22.000000000 -0400
31816 +++ linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-08-05 19:44:37.000000000 -0400
31817 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31818 char *buf) \
31819 { \
31820 struct scsi_device *sdev = to_scsi_device(dev); \
31821 - unsigned long long count = atomic_read(&sdev->field); \
31822 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31823 return snprintf(buf, 20, "0x%llx\n", count); \
31824 } \
31825 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31826 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c
31827 --- linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-05-19 00:06:34.000000000 -0400
31828 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-08-05 19:44:37.000000000 -0400
31829 @@ -485,7 +485,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31830 * Netlink Infrastructure
31831 */
31832
31833 -static atomic_t fc_event_seq;
31834 +static atomic_unchecked_t fc_event_seq;
31835
31836 /**
31837 * fc_get_event_number - Obtain the next sequential FC event number
31838 @@ -498,7 +498,7 @@ static atomic_t fc_event_seq;
31839 u32
31840 fc_get_event_number(void)
31841 {
31842 - return atomic_add_return(1, &fc_event_seq);
31843 + return atomic_add_return_unchecked(1, &fc_event_seq);
31844 }
31845 EXPORT_SYMBOL(fc_get_event_number);
31846
31847 @@ -646,7 +646,7 @@ static __init int fc_transport_init(void
31848 {
31849 int error;
31850
31851 - atomic_set(&fc_event_seq, 0);
31852 + atomic_set_unchecked(&fc_event_seq, 0);
31853
31854 error = transport_class_register(&fc_host_class);
31855 if (error)
31856 @@ -836,7 +836,7 @@ static int fc_str_to_dev_loss(const char
31857 char *cp;
31858
31859 *val = simple_strtoul(buf, &cp, 0);
31860 - if ((*cp && (*cp != '\n')) || (*val < 0))
31861 + if (*cp && (*cp != '\n'))
31862 return -EINVAL;
31863 /*
31864 * Check for overflow; dev_loss_tmo is u32
31865 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c
31866 --- linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-05-19 00:06:34.000000000 -0400
31867 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-05 19:44:37.000000000 -0400
31868 @@ -83,7 +83,7 @@ struct iscsi_internal {
31869 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31870 };
31871
31872 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31873 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31874 static struct workqueue_struct *iscsi_eh_timer_workq;
31875
31876 /*
31877 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31878 int err;
31879
31880 ihost = shost->shost_data;
31881 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31882 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31883
31884 if (id == ISCSI_MAX_TARGET) {
31885 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31886 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31887 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31888 ISCSI_TRANSPORT_VERSION);
31889
31890 - atomic_set(&iscsi_session_nr, 0);
31891 + atomic_set_unchecked(&iscsi_session_nr, 0);
31892
31893 err = class_register(&iscsi_transport_class);
31894 if (err)
31895 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c
31896 --- linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-05-19 00:06:34.000000000 -0400
31897 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-08-05 19:44:37.000000000 -0400
31898 @@ -33,7 +33,7 @@
31899 #include "scsi_transport_srp_internal.h"
31900
31901 struct srp_host_attrs {
31902 - atomic_t next_port_id;
31903 + atomic_unchecked_t next_port_id;
31904 };
31905 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31906
31907 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31908 struct Scsi_Host *shost = dev_to_shost(dev);
31909 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31910
31911 - atomic_set(&srp_host->next_port_id, 0);
31912 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31913 return 0;
31914 }
31915
31916 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31917 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31918 rport->roles = ids->roles;
31919
31920 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31921 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31922 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31923
31924 transport_setup_device(&rport->dev);
31925 diff -urNp linux-2.6.39.4/drivers/scsi/sg.c linux-2.6.39.4/drivers/scsi/sg.c
31926 --- linux-2.6.39.4/drivers/scsi/sg.c 2011-05-19 00:06:34.000000000 -0400
31927 +++ linux-2.6.39.4/drivers/scsi/sg.c 2011-08-05 19:44:37.000000000 -0400
31928 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31929 const struct file_operations * fops;
31930 };
31931
31932 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31933 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31934 {"allow_dio", &adio_fops},
31935 {"debug", &debug_fops},
31936 {"def_reserved_size", &dressz_fops},
31937 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31938 {
31939 int k, mask;
31940 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31941 - struct sg_proc_leaf * leaf;
31942 + const struct sg_proc_leaf * leaf;
31943
31944 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31945 if (!sg_proc_sgp)
31946 diff -urNp linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31947 --- linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-19 00:06:34.000000000 -0400
31948 +++ linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-05 19:44:37.000000000 -0400
31949 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31950 int do_iounmap = 0;
31951 int do_disable_device = 1;
31952
31953 + pax_track_stack();
31954 +
31955 memset(&sym_dev, 0, sizeof(sym_dev));
31956 memset(&nvram, 0, sizeof(nvram));
31957 sym_dev.pdev = pdev;
31958 diff -urNp linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c
31959 --- linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-05-19 00:06:34.000000000 -0400
31960 +++ linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-08-05 19:44:37.000000000 -0400
31961 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
31962 dma_addr_t base;
31963 unsigned i;
31964
31965 + pax_track_stack();
31966 +
31967 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
31968 cmd.reqRingNumPages = adapter->req_pages;
31969 cmd.cmpRingNumPages = adapter->cmp_pages;
31970 diff -urNp linux-2.6.39.4/drivers/spi/spi.c linux-2.6.39.4/drivers/spi/spi.c
31971 --- linux-2.6.39.4/drivers/spi/spi.c 2011-05-19 00:06:34.000000000 -0400
31972 +++ linux-2.6.39.4/drivers/spi/spi.c 2011-08-05 19:44:37.000000000 -0400
31973 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
31974 EXPORT_SYMBOL_GPL(spi_bus_unlock);
31975
31976 /* portable code must never pass more than 32 bytes */
31977 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
31978 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
31979
31980 static u8 *buf;
31981
31982 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
31983 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-05-19 00:06:34.000000000 -0400
31984 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-05 20:34:06.000000000 -0400
31985 @@ -857,14 +857,14 @@ static void dhd_op_if(dhd_if_t *ifp)
31986 free_netdev(ifp->net);
31987 }
31988 /* Allocate etherdev, including space for private structure */
31989 - ifp->net = alloc_etherdev(sizeof(dhd));
31990 + ifp->net = alloc_etherdev(sizeof(*dhd));
31991 if (!ifp->net) {
31992 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
31993 ret = -ENOMEM;
31994 }
31995 if (ret == 0) {
31996 strcpy(ifp->net->name, ifp->name);
31997 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
31998 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
31999 err = dhd_net_attach(&dhd->pub, ifp->idx);
32000 if (err != 0) {
32001 DHD_ERROR(("%s: dhd_net_attach failed, "
32002 @@ -1923,7 +1923,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32003 strcpy(nv_path, nvram_path);
32004
32005 /* Allocate etherdev, including space for private structure */
32006 - net = alloc_etherdev(sizeof(dhd));
32007 + net = alloc_etherdev(sizeof(*dhd));
32008 if (!net) {
32009 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32010 goto fail;
32011 @@ -1939,7 +1939,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32012 /*
32013 * Save the dhd_info into the priv
32014 */
32015 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32016 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32017
32018 /* Set network interface name if it was provided as module parameter */
32019 if (iface_name[0]) {
32020 @@ -2056,7 +2056,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32021 /*
32022 * Save the dhd_info into the priv
32023 */
32024 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32025 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32026
32027 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
32028 g_bus = bus;
32029 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c
32030 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-05-19 00:06:34.000000000 -0400
32031 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-08-05 19:44:37.000000000 -0400
32032 @@ -495,7 +495,7 @@ wl_iw_get_range(struct net_device *dev,
32033 list = (wl_u32_list_t *) channels;
32034
32035 dwrq->length = sizeof(struct iw_range);
32036 - memset(range, 0, sizeof(range));
32037 + memset(range, 0, sizeof(*range));
32038
32039 range->min_nwid = range->max_nwid = 0;
32040
32041 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c
32042 --- linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-05-19 00:06:34.000000000 -0400
32043 +++ linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-08-05 19:44:37.000000000 -0400
32044 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
32045 struct net_device_stats *stats = &etdev->net_stats;
32046
32047 if (tcb->flags & fMP_DEST_BROAD)
32048 - atomic_inc(&etdev->Stats.brdcstxmt);
32049 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
32050 else if (tcb->flags & fMP_DEST_MULTI)
32051 - atomic_inc(&etdev->Stats.multixmt);
32052 + atomic_inc_unchecked(&etdev->Stats.multixmt);
32053 else
32054 - atomic_inc(&etdev->Stats.unixmt);
32055 + atomic_inc_unchecked(&etdev->Stats.unixmt);
32056
32057 if (tcb->skb) {
32058 stats->tx_bytes += tcb->skb->len;
32059 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h
32060 --- linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-05-19 00:06:34.000000000 -0400
32061 +++ linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-08-05 19:44:37.000000000 -0400
32062 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
32063 * operations
32064 */
32065 u32 unircv; /* # multicast packets received */
32066 - atomic_t unixmt; /* # multicast packets for Tx */
32067 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
32068 u32 multircv; /* # multicast packets received */
32069 - atomic_t multixmt; /* # multicast packets for Tx */
32070 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
32071 u32 brdcstrcv; /* # broadcast packets received */
32072 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
32073 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32074 u32 norcvbuf; /* # Rx packets discarded */
32075 u32 noxmtbuf; /* # Tx packets discarded */
32076
32077 diff -urNp linux-2.6.39.4/drivers/staging/hv/channel.c linux-2.6.39.4/drivers/staging/hv/channel.c
32078 --- linux-2.6.39.4/drivers/staging/hv/channel.c 2011-05-19 00:06:34.000000000 -0400
32079 +++ linux-2.6.39.4/drivers/staging/hv/channel.c 2011-08-05 19:44:37.000000000 -0400
32080 @@ -509,8 +509,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32081 unsigned long flags;
32082 int ret = 0;
32083
32084 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32085 - atomic_inc(&vmbus_connection.next_gpadl_handle);
32086 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32087 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32088
32089 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32090 if (ret)
32091 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv.c linux-2.6.39.4/drivers/staging/hv/hv.c
32092 --- linux-2.6.39.4/drivers/staging/hv/hv.c 2011-05-19 00:06:34.000000000 -0400
32093 +++ linux-2.6.39.4/drivers/staging/hv/hv.c 2011-08-05 19:44:37.000000000 -0400
32094 @@ -163,7 +163,7 @@ static u64 do_hypercall(u64 control, voi
32095 u64 output_address = (output) ? virt_to_phys(output) : 0;
32096 u32 output_address_hi = output_address >> 32;
32097 u32 output_address_lo = output_address & 0xFFFFFFFF;
32098 - volatile void *hypercall_page = hv_context.hypercall_page;
32099 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32100
32101 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
32102 control, input, output);
32103 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv_mouse.c linux-2.6.39.4/drivers/staging/hv/hv_mouse.c
32104 --- linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-05-19 00:06:34.000000000 -0400
32105 +++ linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-08-13 20:26:10.000000000 -0400
32106 @@ -898,8 +898,10 @@ static void reportdesc_callback(struct h
32107 if (hid_dev) {
32108 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32109
32110 - hid_dev->ll_driver->open = mousevsc_hid_open;
32111 - hid_dev->ll_driver->close = mousevsc_hid_close;
32112 + pax_open_kernel();
32113 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32114 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32115 + pax_close_kernel();
32116
32117 hid_dev->bus = BUS_VIRTUAL;
32118 hid_dev->vendor = input_device_ctx->device_info.vendor;
32119 diff -urNp linux-2.6.39.4/drivers/staging/hv/rndis_filter.c linux-2.6.39.4/drivers/staging/hv/rndis_filter.c
32120 --- linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-05-19 00:06:34.000000000 -0400
32121 +++ linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-08-05 19:44:37.000000000 -0400
32122 @@ -49,7 +49,7 @@ struct rndis_device {
32123
32124 enum rndis_device_state state;
32125 u32 link_stat;
32126 - atomic_t new_req_id;
32127 + atomic_unchecked_t new_req_id;
32128
32129 spinlock_t request_lock;
32130 struct list_head req_list;
32131 @@ -144,7 +144,7 @@ static struct rndis_request *get_rndis_r
32132 * template
32133 */
32134 set = &rndis_msg->msg.set_req;
32135 - set->req_id = atomic_inc_return(&dev->new_req_id);
32136 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32137
32138 /* Add to the request list */
32139 spin_lock_irqsave(&dev->request_lock, flags);
32140 @@ -709,7 +709,7 @@ static void rndis_filter_halt_device(str
32141
32142 /* Setup the rndis set */
32143 halt = &request->request_msg.msg.halt_req;
32144 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32145 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32146
32147 /* Ignore return since this msg is optional. */
32148 rndis_filter_send_request(dev, request);
32149 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c
32150 --- linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-05-19 00:06:34.000000000 -0400
32151 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-08-05 19:44:37.000000000 -0400
32152 @@ -661,14 +661,14 @@ int vmbus_child_device_register(struct h
32153 {
32154 int ret = 0;
32155
32156 - static atomic_t device_num = ATOMIC_INIT(0);
32157 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32158
32159 DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
32160 child_device_obj);
32161
32162 /* Set the device name. Otherwise, device_register() will fail. */
32163 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32164 - atomic_inc_return(&device_num));
32165 + atomic_inc_return_unchecked(&device_num));
32166
32167 /* The new device belongs to this bus */
32168 child_device_obj->device.bus = &vmbus_drv.bus; /* device->dev.bus; */
32169 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_private.h linux-2.6.39.4/drivers/staging/hv/vmbus_private.h
32170 --- linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-05-19 00:06:34.000000000 -0400
32171 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-08-05 19:44:37.000000000 -0400
32172 @@ -58,7 +58,7 @@ enum vmbus_connect_state {
32173 struct vmbus_connection {
32174 enum vmbus_connect_state conn_state;
32175
32176 - atomic_t next_gpadl_handle;
32177 + atomic_unchecked_t next_gpadl_handle;
32178
32179 /*
32180 * Represents channel interrupts. Each bit position represents a
32181 diff -urNp linux-2.6.39.4/drivers/staging/iio/ring_generic.h linux-2.6.39.4/drivers/staging/iio/ring_generic.h
32182 --- linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-05-19 00:06:34.000000000 -0400
32183 +++ linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-08-13 20:14:25.000000000 -0400
32184 @@ -86,7 +86,7 @@ struct iio_ring_access_funcs {
32185
32186 int (*is_enabled)(struct iio_ring_buffer *ring);
32187 int (*enable)(struct iio_ring_buffer *ring);
32188 -};
32189 +} __no_const;
32190
32191 /**
32192 * struct iio_ring_buffer - general ring buffer structure
32193 @@ -134,7 +134,7 @@ struct iio_ring_buffer {
32194 struct iio_handler access_handler;
32195 struct iio_event_interface ev_int;
32196 struct iio_shared_ev_pointer shared_ev_pointer;
32197 - struct iio_ring_access_funcs access;
32198 + struct iio_ring_access_funcs access;
32199 int (*preenable)(struct iio_dev *);
32200 int (*postenable)(struct iio_dev *);
32201 int (*predisable)(struct iio_dev *);
32202 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet.c linux-2.6.39.4/drivers/staging/octeon/ethernet.c
32203 --- linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-05-19 00:06:34.000000000 -0400
32204 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-08-05 19:44:37.000000000 -0400
32205 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32206 * since the RX tasklet also increments it.
32207 */
32208 #ifdef CONFIG_64BIT
32209 - atomic64_add(rx_status.dropped_packets,
32210 - (atomic64_t *)&priv->stats.rx_dropped);
32211 + atomic64_add_unchecked(rx_status.dropped_packets,
32212 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32213 #else
32214 - atomic_add(rx_status.dropped_packets,
32215 - (atomic_t *)&priv->stats.rx_dropped);
32216 + atomic_add_unchecked(rx_status.dropped_packets,
32217 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32218 #endif
32219 }
32220
32221 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c
32222 --- linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-05-19 00:06:34.000000000 -0400
32223 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-08-05 19:44:37.000000000 -0400
32224 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32225 /* Increment RX stats for virtual ports */
32226 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32227 #ifdef CONFIG_64BIT
32228 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32229 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32230 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32231 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32232 #else
32233 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32234 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32235 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32236 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32237 #endif
32238 }
32239 netif_receive_skb(skb);
32240 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32241 dev->name);
32242 */
32243 #ifdef CONFIG_64BIT
32244 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32245 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32246 #else
32247 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32248 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32249 #endif
32250 dev_kfree_skb_irq(skb);
32251 }
32252 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/inode.c linux-2.6.39.4/drivers/staging/pohmelfs/inode.c
32253 --- linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-05-19 00:06:34.000000000 -0400
32254 +++ linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-08-05 19:44:37.000000000 -0400
32255 @@ -1855,7 +1855,7 @@ static int pohmelfs_fill_super(struct su
32256 mutex_init(&psb->mcache_lock);
32257 psb->mcache_root = RB_ROOT;
32258 psb->mcache_timeout = msecs_to_jiffies(5000);
32259 - atomic_long_set(&psb->mcache_gen, 0);
32260 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32261
32262 psb->trans_max_pages = 100;
32263
32264 @@ -1870,7 +1870,7 @@ static int pohmelfs_fill_super(struct su
32265 INIT_LIST_HEAD(&psb->crypto_ready_list);
32266 INIT_LIST_HEAD(&psb->crypto_active_list);
32267
32268 - atomic_set(&psb->trans_gen, 1);
32269 + atomic_set_unchecked(&psb->trans_gen, 1);
32270 atomic_long_set(&psb->total_inodes, 0);
32271
32272 mutex_init(&psb->state_lock);
32273 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c
32274 --- linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-05-19 00:06:34.000000000 -0400
32275 +++ linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-08-05 19:44:37.000000000 -0400
32276 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32277 m->data = data;
32278 m->start = start;
32279 m->size = size;
32280 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32281 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32282
32283 mutex_lock(&psb->mcache_lock);
32284 err = pohmelfs_mcache_insert(psb, m);
32285 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h
32286 --- linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-05-19 00:06:34.000000000 -0400
32287 +++ linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-08-05 19:44:37.000000000 -0400
32288 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32289 struct pohmelfs_sb {
32290 struct rb_root mcache_root;
32291 struct mutex mcache_lock;
32292 - atomic_long_t mcache_gen;
32293 + atomic_long_unchecked_t mcache_gen;
32294 unsigned long mcache_timeout;
32295
32296 unsigned int idx;
32297
32298 unsigned int trans_retries;
32299
32300 - atomic_t trans_gen;
32301 + atomic_unchecked_t trans_gen;
32302
32303 unsigned int crypto_attached_size;
32304 unsigned int crypto_align_size;
32305 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/trans.c linux-2.6.39.4/drivers/staging/pohmelfs/trans.c
32306 --- linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-05-19 00:06:34.000000000 -0400
32307 +++ linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-08-05 19:44:37.000000000 -0400
32308 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32309 int err;
32310 struct netfs_cmd *cmd = t->iovec.iov_base;
32311
32312 - t->gen = atomic_inc_return(&psb->trans_gen);
32313 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32314
32315 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32316 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32317 diff -urNp linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h
32318 --- linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-05-19 00:06:34.000000000 -0400
32319 +++ linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-13 20:31:57.000000000 -0400
32320 @@ -83,7 +83,7 @@ struct _io_ops {
32321 u8 *pmem);
32322 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32323 u8 *pmem);
32324 -};
32325 +} __no_const;
32326
32327 struct io_req {
32328 struct list_head list;
32329 diff -urNp linux-2.6.39.4/drivers/staging/tty/istallion.c linux-2.6.39.4/drivers/staging/tty/istallion.c
32330 --- linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-05-19 00:06:34.000000000 -0400
32331 +++ linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-08-05 19:44:37.000000000 -0400
32332 @@ -186,7 +186,6 @@ static struct ktermios stli_deftermios
32333 * re-used for each stats call.
32334 */
32335 static comstats_t stli_comstats;
32336 -static combrd_t stli_brdstats;
32337 static struct asystats stli_cdkstats;
32338
32339 /*****************************************************************************/
32340 @@ -4003,6 +4002,7 @@ out:
32341
32342 static int stli_getbrdstats(combrd_t __user *bp)
32343 {
32344 + combrd_t stli_brdstats;
32345 struct stlibrd *brdp;
32346 unsigned int i;
32347
32348 @@ -4226,6 +4226,8 @@ static int stli_getportstruct(struct stl
32349 struct stliport stli_dummyport;
32350 struct stliport *portp;
32351
32352 + pax_track_stack();
32353 +
32354 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32355 return -EFAULT;
32356 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32357 @@ -4248,6 +4250,8 @@ static int stli_getbrdstruct(struct stli
32358 struct stlibrd stli_dummybrd;
32359 struct stlibrd *brdp;
32360
32361 + pax_track_stack();
32362 +
32363 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32364 return -EFAULT;
32365 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32366 diff -urNp linux-2.6.39.4/drivers/staging/tty/stallion.c linux-2.6.39.4/drivers/staging/tty/stallion.c
32367 --- linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-05-19 00:06:34.000000000 -0400
32368 +++ linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-08-05 19:44:37.000000000 -0400
32369 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32370 struct stlport stl_dummyport;
32371 struct stlport *portp;
32372
32373 + pax_track_stack();
32374 +
32375 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32376 return -EFAULT;
32377 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32378 diff -urNp linux-2.6.39.4/drivers/staging/usbip/stub_dev.c linux-2.6.39.4/drivers/staging/usbip/stub_dev.c
32379 --- linux-2.6.39.4/drivers/staging/usbip/stub_dev.c 2011-05-19 00:06:34.000000000 -0400
32380 +++ linux-2.6.39.4/drivers/staging/usbip/stub_dev.c 2011-08-13 20:32:52.000000000 -0400
32381 @@ -357,9 +357,11 @@ static struct stub_device *stub_device_a
32382
32383 init_waitqueue_head(&sdev->tx_waitq);
32384
32385 - sdev->ud.eh_ops.shutdown = stub_shutdown_connection;
32386 - sdev->ud.eh_ops.reset = stub_device_reset;
32387 - sdev->ud.eh_ops.unusable = stub_device_unusable;
32388 + pax_open_kernel();
32389 + *(void **)&sdev->ud.eh_ops.shutdown = stub_shutdown_connection;
32390 + *(void **)&sdev->ud.eh_ops.reset = stub_device_reset;
32391 + *(void **)&sdev->ud.eh_ops.unusable = stub_device_unusable;
32392 + pax_close_kernel();
32393
32394 usbip_start_eh(&sdev->ud);
32395
32396 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci.h linux-2.6.39.4/drivers/staging/usbip/vhci.h
32397 --- linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-05-19 00:06:34.000000000 -0400
32398 +++ linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-08-05 19:44:37.000000000 -0400
32399 @@ -92,7 +92,7 @@ struct vhci_hcd {
32400 unsigned resuming:1;
32401 unsigned long re_timeout;
32402
32403 - atomic_t seqnum;
32404 + atomic_unchecked_t seqnum;
32405
32406 /*
32407 * NOTE:
32408 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c
32409 --- linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-05-19 00:06:34.000000000 -0400
32410 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-08-13 20:33:49.000000000 -0400
32411 @@ -536,7 +536,7 @@ static void vhci_tx_urb(struct urb *urb)
32412 return;
32413 }
32414
32415 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32416 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32417 if (priv->seqnum == 0xffff)
32418 usbip_uinfo("seqnum max\n");
32419
32420 @@ -795,7 +795,7 @@ static int vhci_urb_dequeue(struct usb_h
32421 return -ENOMEM;
32422 }
32423
32424 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32425 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32426 if (unlink->seqnum == 0xffff)
32427 usbip_uinfo("seqnum max\n");
32428
32429 @@ -965,9 +965,11 @@ static void vhci_device_init(struct vhci
32430
32431 init_waitqueue_head(&vdev->waitq_tx);
32432
32433 - vdev->ud.eh_ops.shutdown = vhci_shutdown_connection;
32434 - vdev->ud.eh_ops.reset = vhci_device_reset;
32435 - vdev->ud.eh_ops.unusable = vhci_device_unusable;
32436 + pax_open_kernel();
32437 + *(void **)&vdev->ud.eh_ops.shutdown = vhci_shutdown_connection;
32438 + *(void **)&vdev->ud.eh_ops.reset = vhci_device_reset;
32439 + *(void **)&vdev->ud.eh_ops.unusable = vhci_device_unusable;
32440 + pax_close_kernel();
32441
32442 usbip_start_eh(&vdev->ud);
32443 }
32444 @@ -992,7 +994,7 @@ static int vhci_start(struct usb_hcd *hc
32445 vdev->rhport = rhport;
32446 }
32447
32448 - atomic_set(&vhci->seqnum, 0);
32449 + atomic_set_unchecked(&vhci->seqnum, 0);
32450 spin_lock_init(&vhci->lock);
32451
32452
32453 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c
32454 --- linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-05-19 00:06:34.000000000 -0400
32455 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-08-05 19:44:37.000000000 -0400
32456 @@ -81,7 +81,7 @@ static void vhci_recv_ret_submit(struct
32457 usbip_uerr("cannot find a urb of seqnum %u\n",
32458 pdu->base.seqnum);
32459 usbip_uinfo("max seqnum %d\n",
32460 - atomic_read(&the_controller->seqnum));
32461 + atomic_read_unchecked(&the_controller->seqnum));
32462 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32463 return;
32464 }
32465 diff -urNp linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c
32466 --- linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-05-19 00:06:34.000000000 -0400
32467 +++ linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-13 20:36:25.000000000 -0400
32468 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32469
32470 struct usbctlx_completor {
32471 int (*complete) (struct usbctlx_completor *);
32472 -};
32473 +} __no_const;
32474
32475 static int
32476 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32477 diff -urNp linux-2.6.39.4/drivers/target/target_core_alua.c linux-2.6.39.4/drivers/target/target_core_alua.c
32478 --- linux-2.6.39.4/drivers/target/target_core_alua.c 2011-05-19 00:06:34.000000000 -0400
32479 +++ linux-2.6.39.4/drivers/target/target_core_alua.c 2011-08-05 19:44:37.000000000 -0400
32480 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32481 char path[ALUA_METADATA_PATH_LEN];
32482 int len;
32483
32484 + pax_track_stack();
32485 +
32486 memset(path, 0, ALUA_METADATA_PATH_LEN);
32487
32488 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32489 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32490 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32491 int len;
32492
32493 + pax_track_stack();
32494 +
32495 memset(path, 0, ALUA_METADATA_PATH_LEN);
32496 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32497
32498 diff -urNp linux-2.6.39.4/drivers/target/target_core_cdb.c linux-2.6.39.4/drivers/target/target_core_cdb.c
32499 --- linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-05-19 00:06:34.000000000 -0400
32500 +++ linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-08-05 19:44:37.000000000 -0400
32501 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32502 int length = 0;
32503 unsigned char buf[SE_MODE_PAGE_BUF];
32504
32505 + pax_track_stack();
32506 +
32507 memset(buf, 0, SE_MODE_PAGE_BUF);
32508
32509 switch (cdb[2] & 0x3f) {
32510 diff -urNp linux-2.6.39.4/drivers/target/target_core_configfs.c linux-2.6.39.4/drivers/target/target_core_configfs.c
32511 --- linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-05-19 00:06:34.000000000 -0400
32512 +++ linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-08-05 20:34:06.000000000 -0400
32513 @@ -1280,6 +1280,8 @@ static ssize_t target_core_dev_pr_show_a
32514 ssize_t len = 0;
32515 int reg_count = 0, prf_isid;
32516
32517 + pax_track_stack();
32518 +
32519 if (!(su_dev->se_dev_ptr))
32520 return -ENODEV;
32521
32522 diff -urNp linux-2.6.39.4/drivers/target/target_core_pr.c linux-2.6.39.4/drivers/target/target_core_pr.c
32523 --- linux-2.6.39.4/drivers/target/target_core_pr.c 2011-05-19 00:06:34.000000000 -0400
32524 +++ linux-2.6.39.4/drivers/target/target_core_pr.c 2011-08-05 19:44:37.000000000 -0400
32525 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32526 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32527 u16 tpgt;
32528
32529 + pax_track_stack();
32530 +
32531 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32532 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32533 /*
32534 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32535 ssize_t len = 0;
32536 int reg_count = 0;
32537
32538 + pax_track_stack();
32539 +
32540 memset(buf, 0, pr_aptpl_buf_len);
32541 /*
32542 * Called to clear metadata once APTPL has been deactivated.
32543 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32544 char path[512];
32545 int ret;
32546
32547 + pax_track_stack();
32548 +
32549 memset(iov, 0, sizeof(struct iovec));
32550 memset(path, 0, 512);
32551
32552 diff -urNp linux-2.6.39.4/drivers/target/target_core_tmr.c linux-2.6.39.4/drivers/target/target_core_tmr.c
32553 --- linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-06-03 00:04:14.000000000 -0400
32554 +++ linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-08-05 19:44:37.000000000 -0400
32555 @@ -263,7 +263,7 @@ int core_tmr_lun_reset(
32556 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32557 T_TASK(cmd)->t_task_cdbs,
32558 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32559 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32560 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32561 atomic_read(&T_TASK(cmd)->t_transport_active),
32562 atomic_read(&T_TASK(cmd)->t_transport_stop),
32563 atomic_read(&T_TASK(cmd)->t_transport_sent));
32564 @@ -305,7 +305,7 @@ int core_tmr_lun_reset(
32565 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32566 " task: %p, t_fe_count: %d dev: %p\n", task,
32567 fe_count, dev);
32568 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32569 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32570 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32571 flags);
32572 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32573 @@ -315,7 +315,7 @@ int core_tmr_lun_reset(
32574 }
32575 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32576 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32577 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32578 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32579 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32580 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32581
32582 diff -urNp linux-2.6.39.4/drivers/target/target_core_transport.c linux-2.6.39.4/drivers/target/target_core_transport.c
32583 --- linux-2.6.39.4/drivers/target/target_core_transport.c 2011-06-03 00:04:14.000000000 -0400
32584 +++ linux-2.6.39.4/drivers/target/target_core_transport.c 2011-08-05 19:44:37.000000000 -0400
32585 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32586
32587 dev->queue_depth = dev_limits->queue_depth;
32588 atomic_set(&dev->depth_left, dev->queue_depth);
32589 - atomic_set(&dev->dev_ordered_id, 0);
32590 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32591
32592 se_dev_set_default_attribs(dev, dev_limits);
32593
32594 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32595 * Used to determine when ORDERED commands should go from
32596 * Dormant to Active status.
32597 */
32598 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32599 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32600 smp_mb__after_atomic_inc();
32601 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32602 cmd->se_ordered_id, cmd->sam_task_attr,
32603 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32604 " t_transport_active: %d t_transport_stop: %d"
32605 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32606 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32607 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32608 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32609 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32610 atomic_read(&T_TASK(cmd)->t_transport_active),
32611 atomic_read(&T_TASK(cmd)->t_transport_stop),
32612 @@ -2673,9 +2673,9 @@ check_depth:
32613 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32614 atomic_set(&task->task_active, 1);
32615 atomic_set(&task->task_sent, 1);
32616 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32617 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32618
32619 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32620 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32621 T_TASK(cmd)->t_task_cdbs)
32622 atomic_set(&cmd->transport_sent, 1);
32623
32624 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32625 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32626 }
32627 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32628 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32629 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32630 goto remove;
32631
32632 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32633 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32634 {
32635 int ret = 0;
32636
32637 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32638 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32639 if (!(send_status) ||
32640 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32641 return 1;
32642 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32643 */
32644 if (cmd->data_direction == DMA_TO_DEVICE) {
32645 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32646 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32647 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32648 smp_mb__after_atomic_inc();
32649 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32650 transport_new_cmd_failure(cmd);
32651 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32652 CMD_TFO(cmd)->get_task_tag(cmd),
32653 T_TASK(cmd)->t_task_cdbs,
32654 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32655 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32656 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32657 atomic_read(&T_TASK(cmd)->t_transport_active),
32658 atomic_read(&T_TASK(cmd)->t_transport_stop),
32659 atomic_read(&T_TASK(cmd)->t_transport_sent));
32660 diff -urNp linux-2.6.39.4/drivers/telephony/ixj.c linux-2.6.39.4/drivers/telephony/ixj.c
32661 --- linux-2.6.39.4/drivers/telephony/ixj.c 2011-05-19 00:06:34.000000000 -0400
32662 +++ linux-2.6.39.4/drivers/telephony/ixj.c 2011-08-05 19:44:37.000000000 -0400
32663 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32664 bool mContinue;
32665 char *pIn, *pOut;
32666
32667 + pax_track_stack();
32668 +
32669 if (!SCI_Prepare(j))
32670 return 0;
32671
32672 diff -urNp linux-2.6.39.4/drivers/tty/hvc/hvcs.c linux-2.6.39.4/drivers/tty/hvc/hvcs.c
32673 --- linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-05-19 00:06:34.000000000 -0400
32674 +++ linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-08-05 19:44:37.000000000 -0400
32675 @@ -83,6 +83,7 @@
32676 #include <asm/hvcserver.h>
32677 #include <asm/uaccess.h>
32678 #include <asm/vio.h>
32679 +#include <asm/local.h>
32680
32681 /*
32682 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32683 @@ -270,7 +271,7 @@ struct hvcs_struct {
32684 unsigned int index;
32685
32686 struct tty_struct *tty;
32687 - int open_count;
32688 + local_t open_count;
32689
32690 /*
32691 * Used to tell the driver kernel_thread what operations need to take
32692 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32693
32694 spin_lock_irqsave(&hvcsd->lock, flags);
32695
32696 - if (hvcsd->open_count > 0) {
32697 + if (local_read(&hvcsd->open_count) > 0) {
32698 spin_unlock_irqrestore(&hvcsd->lock, flags);
32699 printk(KERN_INFO "HVCS: vterm state unchanged. "
32700 "The hvcs device node is still in use.\n");
32701 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32702 if ((retval = hvcs_partner_connect(hvcsd)))
32703 goto error_release;
32704
32705 - hvcsd->open_count = 1;
32706 + local_set(&hvcsd->open_count, 1);
32707 hvcsd->tty = tty;
32708 tty->driver_data = hvcsd;
32709
32710 @@ -1179,7 +1180,7 @@ fast_open:
32711
32712 spin_lock_irqsave(&hvcsd->lock, flags);
32713 kref_get(&hvcsd->kref);
32714 - hvcsd->open_count++;
32715 + local_inc(&hvcsd->open_count);
32716 hvcsd->todo_mask |= HVCS_SCHED_READ;
32717 spin_unlock_irqrestore(&hvcsd->lock, flags);
32718
32719 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32720 hvcsd = tty->driver_data;
32721
32722 spin_lock_irqsave(&hvcsd->lock, flags);
32723 - if (--hvcsd->open_count == 0) {
32724 + if (local_dec_and_test(&hvcsd->open_count)) {
32725
32726 vio_disable_interrupts(hvcsd->vdev);
32727
32728 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32729 free_irq(irq, hvcsd);
32730 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32731 return;
32732 - } else if (hvcsd->open_count < 0) {
32733 + } else if (local_read(&hvcsd->open_count) < 0) {
32734 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32735 " is missmanaged.\n",
32736 - hvcsd->vdev->unit_address, hvcsd->open_count);
32737 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32738 }
32739
32740 spin_unlock_irqrestore(&hvcsd->lock, flags);
32741 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32742
32743 spin_lock_irqsave(&hvcsd->lock, flags);
32744 /* Preserve this so that we know how many kref refs to put */
32745 - temp_open_count = hvcsd->open_count;
32746 + temp_open_count = local_read(&hvcsd->open_count);
32747
32748 /*
32749 * Don't kref put inside the spinlock because the destruction
32750 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32751 hvcsd->tty->driver_data = NULL;
32752 hvcsd->tty = NULL;
32753
32754 - hvcsd->open_count = 0;
32755 + local_set(&hvcsd->open_count, 0);
32756
32757 /* This will drop any buffered data on the floor which is OK in a hangup
32758 * scenario. */
32759 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32760 * the middle of a write operation? This is a crummy place to do this
32761 * but we want to keep it all in the spinlock.
32762 */
32763 - if (hvcsd->open_count <= 0) {
32764 + if (local_read(&hvcsd->open_count) <= 0) {
32765 spin_unlock_irqrestore(&hvcsd->lock, flags);
32766 return -ENODEV;
32767 }
32768 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32769 {
32770 struct hvcs_struct *hvcsd = tty->driver_data;
32771
32772 - if (!hvcsd || hvcsd->open_count <= 0)
32773 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32774 return 0;
32775
32776 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32777 diff -urNp linux-2.6.39.4/drivers/tty/ipwireless/tty.c linux-2.6.39.4/drivers/tty/ipwireless/tty.c
32778 --- linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-05-19 00:06:34.000000000 -0400
32779 +++ linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-08-05 19:44:37.000000000 -0400
32780 @@ -29,6 +29,7 @@
32781 #include <linux/tty_driver.h>
32782 #include <linux/tty_flip.h>
32783 #include <linux/uaccess.h>
32784 +#include <asm/local.h>
32785
32786 #include "tty.h"
32787 #include "network.h"
32788 @@ -51,7 +52,7 @@ struct ipw_tty {
32789 int tty_type;
32790 struct ipw_network *network;
32791 struct tty_struct *linux_tty;
32792 - int open_count;
32793 + local_t open_count;
32794 unsigned int control_lines;
32795 struct mutex ipw_tty_mutex;
32796 int tx_bytes_queued;
32797 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32798 mutex_unlock(&tty->ipw_tty_mutex);
32799 return -ENODEV;
32800 }
32801 - if (tty->open_count == 0)
32802 + if (local_read(&tty->open_count) == 0)
32803 tty->tx_bytes_queued = 0;
32804
32805 - tty->open_count++;
32806 + local_inc(&tty->open_count);
32807
32808 tty->linux_tty = linux_tty;
32809 linux_tty->driver_data = tty;
32810 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32811
32812 static void do_ipw_close(struct ipw_tty *tty)
32813 {
32814 - tty->open_count--;
32815 -
32816 - if (tty->open_count == 0) {
32817 + if (local_dec_return(&tty->open_count) == 0) {
32818 struct tty_struct *linux_tty = tty->linux_tty;
32819
32820 if (linux_tty != NULL) {
32821 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32822 return;
32823
32824 mutex_lock(&tty->ipw_tty_mutex);
32825 - if (tty->open_count == 0) {
32826 + if (local_read(&tty->open_count) == 0) {
32827 mutex_unlock(&tty->ipw_tty_mutex);
32828 return;
32829 }
32830 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32831 return;
32832 }
32833
32834 - if (!tty->open_count) {
32835 + if (!local_read(&tty->open_count)) {
32836 mutex_unlock(&tty->ipw_tty_mutex);
32837 return;
32838 }
32839 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32840 return -ENODEV;
32841
32842 mutex_lock(&tty->ipw_tty_mutex);
32843 - if (!tty->open_count) {
32844 + if (!local_read(&tty->open_count)) {
32845 mutex_unlock(&tty->ipw_tty_mutex);
32846 return -EINVAL;
32847 }
32848 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32849 if (!tty)
32850 return -ENODEV;
32851
32852 - if (!tty->open_count)
32853 + if (!local_read(&tty->open_count))
32854 return -EINVAL;
32855
32856 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32857 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32858 if (!tty)
32859 return 0;
32860
32861 - if (!tty->open_count)
32862 + if (!local_read(&tty->open_count))
32863 return 0;
32864
32865 return tty->tx_bytes_queued;
32866 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32867 if (!tty)
32868 return -ENODEV;
32869
32870 - if (!tty->open_count)
32871 + if (!local_read(&tty->open_count))
32872 return -EINVAL;
32873
32874 return get_control_lines(tty);
32875 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32876 if (!tty)
32877 return -ENODEV;
32878
32879 - if (!tty->open_count)
32880 + if (!local_read(&tty->open_count))
32881 return -EINVAL;
32882
32883 return set_control_lines(tty, set, clear);
32884 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32885 if (!tty)
32886 return -ENODEV;
32887
32888 - if (!tty->open_count)
32889 + if (!local_read(&tty->open_count))
32890 return -EINVAL;
32891
32892 /* FIXME: Exactly how is the tty object locked here .. */
32893 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32894 against a parallel ioctl etc */
32895 mutex_lock(&ttyj->ipw_tty_mutex);
32896 }
32897 - while (ttyj->open_count)
32898 + while (local_read(&ttyj->open_count))
32899 do_ipw_close(ttyj);
32900 ipwireless_disassociate_network_ttys(network,
32901 ttyj->channel_idx);
32902 diff -urNp linux-2.6.39.4/drivers/tty/n_gsm.c linux-2.6.39.4/drivers/tty/n_gsm.c
32903 --- linux-2.6.39.4/drivers/tty/n_gsm.c 2011-05-19 00:06:34.000000000 -0400
32904 +++ linux-2.6.39.4/drivers/tty/n_gsm.c 2011-08-05 19:44:37.000000000 -0400
32905 @@ -1588,7 +1588,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32906 return NULL;
32907 spin_lock_init(&dlci->lock);
32908 dlci->fifo = &dlci->_fifo;
32909 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32910 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32911 kfree(dlci);
32912 return NULL;
32913 }
32914 diff -urNp linux-2.6.39.4/drivers/tty/n_tty.c linux-2.6.39.4/drivers/tty/n_tty.c
32915 --- linux-2.6.39.4/drivers/tty/n_tty.c 2011-05-19 00:06:34.000000000 -0400
32916 +++ linux-2.6.39.4/drivers/tty/n_tty.c 2011-08-05 19:44:37.000000000 -0400
32917 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32918 {
32919 *ops = tty_ldisc_N_TTY;
32920 ops->owner = NULL;
32921 - ops->refcount = ops->flags = 0;
32922 + atomic_set(&ops->refcount, 0);
32923 + ops->flags = 0;
32924 }
32925 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32926 diff -urNp linux-2.6.39.4/drivers/tty/pty.c linux-2.6.39.4/drivers/tty/pty.c
32927 --- linux-2.6.39.4/drivers/tty/pty.c 2011-05-19 00:06:34.000000000 -0400
32928 +++ linux-2.6.39.4/drivers/tty/pty.c 2011-08-05 20:34:06.000000000 -0400
32929 @@ -753,8 +753,10 @@ static void __init unix98_pty_init(void)
32930 register_sysctl_table(pty_root_table);
32931
32932 /* Now create the /dev/ptmx special device */
32933 + pax_open_kernel();
32934 tty_default_fops(&ptmx_fops);
32935 - ptmx_fops.open = ptmx_open;
32936 + *(void **)&ptmx_fops.open = ptmx_open;
32937 + pax_close_kernel();
32938
32939 cdev_init(&ptmx_cdev, &ptmx_fops);
32940 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32941 diff -urNp linux-2.6.39.4/drivers/tty/rocket.c linux-2.6.39.4/drivers/tty/rocket.c
32942 --- linux-2.6.39.4/drivers/tty/rocket.c 2011-05-19 00:06:34.000000000 -0400
32943 +++ linux-2.6.39.4/drivers/tty/rocket.c 2011-08-05 19:44:37.000000000 -0400
32944 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
32945 struct rocket_ports tmp;
32946 int board;
32947
32948 + pax_track_stack();
32949 +
32950 if (!retports)
32951 return -EFAULT;
32952 memset(&tmp, 0, sizeof (tmp));
32953 diff -urNp linux-2.6.39.4/drivers/tty/serial/kgdboc.c linux-2.6.39.4/drivers/tty/serial/kgdboc.c
32954 --- linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-05-19 00:06:34.000000000 -0400
32955 +++ linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-08-05 20:34:06.000000000 -0400
32956 @@ -23,8 +23,9 @@
32957 #define MAX_CONFIG_LEN 40
32958
32959 static struct kgdb_io kgdboc_io_ops;
32960 +static struct kgdb_io kgdboc_io_ops_console;
32961
32962 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
32963 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
32964 static int configured = -1;
32965
32966 static char config[MAX_CONFIG_LEN];
32967 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
32968 kgdboc_unregister_kbd();
32969 if (configured == 1)
32970 kgdb_unregister_io_module(&kgdboc_io_ops);
32971 + else if (configured == 2)
32972 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
32973 }
32974
32975 static int configure_kgdboc(void)
32976 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
32977 int err;
32978 char *cptr = config;
32979 struct console *cons;
32980 + int is_console = 0;
32981
32982 err = kgdboc_option_setup(config);
32983 if (err || !strlen(config) || isspace(config[0]))
32984 goto noconfig;
32985
32986 err = -ENODEV;
32987 - kgdboc_io_ops.is_console = 0;
32988 kgdb_tty_driver = NULL;
32989
32990 kgdboc_use_kms = 0;
32991 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
32992 int idx;
32993 if (cons->device && cons->device(cons, &idx) == p &&
32994 idx == tty_line) {
32995 - kgdboc_io_ops.is_console = 1;
32996 + is_console = 1;
32997 break;
32998 }
32999 cons = cons->next;
33000 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
33001 kgdb_tty_line = tty_line;
33002
33003 do_register:
33004 - err = kgdb_register_io_module(&kgdboc_io_ops);
33005 + if (is_console) {
33006 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
33007 + configured = 2;
33008 + } else {
33009 + err = kgdb_register_io_module(&kgdboc_io_ops);
33010 + configured = 1;
33011 + }
33012 if (err)
33013 goto noconfig;
33014
33015 - configured = 1;
33016 -
33017 return 0;
33018
33019 noconfig:
33020 @@ -212,7 +219,7 @@ noconfig:
33021 static int __init init_kgdboc(void)
33022 {
33023 /* Already configured? */
33024 - if (configured == 1)
33025 + if (configured >= 1)
33026 return 0;
33027
33028 return configure_kgdboc();
33029 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
33030 if (config[len - 1] == '\n')
33031 config[len - 1] = '\0';
33032
33033 - if (configured == 1)
33034 + if (configured >= 1)
33035 cleanup_kgdboc();
33036
33037 /* Go and configure with the new params. */
33038 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
33039 .post_exception = kgdboc_post_exp_handler,
33040 };
33041
33042 +static struct kgdb_io kgdboc_io_ops_console = {
33043 + .name = "kgdboc",
33044 + .read_char = kgdboc_get_char,
33045 + .write_char = kgdboc_put_char,
33046 + .pre_exception = kgdboc_pre_exp_handler,
33047 + .post_exception = kgdboc_post_exp_handler,
33048 + .is_console = 1
33049 +};
33050 +
33051 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33052 /* This is only available if kgdboc is a built in for early debugging */
33053 static int __init kgdboc_early_init(char *opt)
33054 diff -urNp linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c
33055 --- linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-05-19 00:06:34.000000000 -0400
33056 +++ linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-08-05 20:34:06.000000000 -0400
33057 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33058 int loop = 1, num, total = 0;
33059 u8 recv_buf[512], *pbuf;
33060
33061 + pax_track_stack();
33062 +
33063 pbuf = recv_buf;
33064 do {
33065 num = max3110_read_multi(max, pbuf);
33066 diff -urNp linux-2.6.39.4/drivers/tty/tty_io.c linux-2.6.39.4/drivers/tty/tty_io.c
33067 --- linux-2.6.39.4/drivers/tty/tty_io.c 2011-05-19 00:06:34.000000000 -0400
33068 +++ linux-2.6.39.4/drivers/tty/tty_io.c 2011-08-05 20:34:06.000000000 -0400
33069 @@ -3200,7 +3200,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33070
33071 void tty_default_fops(struct file_operations *fops)
33072 {
33073 - *fops = tty_fops;
33074 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33075 }
33076
33077 /*
33078 diff -urNp linux-2.6.39.4/drivers/tty/tty_ldisc.c linux-2.6.39.4/drivers/tty/tty_ldisc.c
33079 --- linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-07-09 09:18:51.000000000 -0400
33080 +++ linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-08-05 19:44:37.000000000 -0400
33081 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33082 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33083 struct tty_ldisc_ops *ldo = ld->ops;
33084
33085 - ldo->refcount--;
33086 + atomic_dec(&ldo->refcount);
33087 module_put(ldo->owner);
33088 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33089
33090 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33091 spin_lock_irqsave(&tty_ldisc_lock, flags);
33092 tty_ldiscs[disc] = new_ldisc;
33093 new_ldisc->num = disc;
33094 - new_ldisc->refcount = 0;
33095 + atomic_set(&new_ldisc->refcount, 0);
33096 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33097
33098 return ret;
33099 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33100 return -EINVAL;
33101
33102 spin_lock_irqsave(&tty_ldisc_lock, flags);
33103 - if (tty_ldiscs[disc]->refcount)
33104 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33105 ret = -EBUSY;
33106 else
33107 tty_ldiscs[disc] = NULL;
33108 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33109 if (ldops) {
33110 ret = ERR_PTR(-EAGAIN);
33111 if (try_module_get(ldops->owner)) {
33112 - ldops->refcount++;
33113 + atomic_inc(&ldops->refcount);
33114 ret = ldops;
33115 }
33116 }
33117 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33118 unsigned long flags;
33119
33120 spin_lock_irqsave(&tty_ldisc_lock, flags);
33121 - ldops->refcount--;
33122 + atomic_dec(&ldops->refcount);
33123 module_put(ldops->owner);
33124 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33125 }
33126 diff -urNp linux-2.6.39.4/drivers/tty/vt/keyboard.c linux-2.6.39.4/drivers/tty/vt/keyboard.c
33127 --- linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-05-19 00:06:34.000000000 -0400
33128 +++ linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-08-05 19:44:37.000000000 -0400
33129 @@ -658,6 +658,16 @@ static void k_spec(struct vc_data *vc, u
33130 kbd->kbdmode == VC_OFF) &&
33131 value != KVAL(K_SAK))
33132 return; /* SAK is allowed even in raw mode */
33133 +
33134 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33135 + {
33136 + void *func = fn_handler[value];
33137 + if (func == fn_show_state || func == fn_show_ptregs ||
33138 + func == fn_show_mem)
33139 + return;
33140 + }
33141 +#endif
33142 +
33143 fn_handler[value](vc);
33144 }
33145
33146 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt.c linux-2.6.39.4/drivers/tty/vt/vt.c
33147 --- linux-2.6.39.4/drivers/tty/vt/vt.c 2011-05-19 00:06:34.000000000 -0400
33148 +++ linux-2.6.39.4/drivers/tty/vt/vt.c 2011-08-05 19:44:37.000000000 -0400
33149 @@ -261,7 +261,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33150
33151 static void notify_write(struct vc_data *vc, unsigned int unicode)
33152 {
33153 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33154 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33155 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33156 }
33157
33158 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c
33159 --- linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-05-19 00:06:34.000000000 -0400
33160 +++ linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-08-05 19:44:37.000000000 -0400
33161 @@ -209,9 +209,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33162 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33163 return -EFAULT;
33164
33165 - if (!capable(CAP_SYS_TTY_CONFIG))
33166 - perm = 0;
33167 -
33168 switch (cmd) {
33169 case KDGKBENT:
33170 key_map = key_maps[s];
33171 @@ -223,6 +220,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33172 val = (i ? K_HOLE : K_NOSUCHMAP);
33173 return put_user(val, &user_kbe->kb_value);
33174 case KDSKBENT:
33175 + if (!capable(CAP_SYS_TTY_CONFIG))
33176 + perm = 0;
33177 +
33178 if (!perm)
33179 return -EPERM;
33180 if (!i && v == K_NOSUCHMAP) {
33181 @@ -324,9 +324,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33182 int i, j, k;
33183 int ret;
33184
33185 - if (!capable(CAP_SYS_TTY_CONFIG))
33186 - perm = 0;
33187 -
33188 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33189 if (!kbs) {
33190 ret = -ENOMEM;
33191 @@ -360,6 +357,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33192 kfree(kbs);
33193 return ((p && *p) ? -EOVERFLOW : 0);
33194 case KDSKBSENT:
33195 + if (!capable(CAP_SYS_TTY_CONFIG))
33196 + perm = 0;
33197 +
33198 if (!perm) {
33199 ret = -EPERM;
33200 goto reterr;
33201 diff -urNp linux-2.6.39.4/drivers/uio/uio.c linux-2.6.39.4/drivers/uio/uio.c
33202 --- linux-2.6.39.4/drivers/uio/uio.c 2011-05-19 00:06:34.000000000 -0400
33203 +++ linux-2.6.39.4/drivers/uio/uio.c 2011-08-05 19:44:37.000000000 -0400
33204 @@ -25,6 +25,7 @@
33205 #include <linux/kobject.h>
33206 #include <linux/cdev.h>
33207 #include <linux/uio_driver.h>
33208 +#include <asm/local.h>
33209
33210 #define UIO_MAX_DEVICES (1U << MINORBITS)
33211
33212 @@ -32,10 +33,10 @@ struct uio_device {
33213 struct module *owner;
33214 struct device *dev;
33215 int minor;
33216 - atomic_t event;
33217 + atomic_unchecked_t event;
33218 struct fasync_struct *async_queue;
33219 wait_queue_head_t wait;
33220 - int vma_count;
33221 + local_t vma_count;
33222 struct uio_info *info;
33223 struct kobject *map_dir;
33224 struct kobject *portio_dir;
33225 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33226 struct device_attribute *attr, char *buf)
33227 {
33228 struct uio_device *idev = dev_get_drvdata(dev);
33229 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33230 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33231 }
33232
33233 static struct device_attribute uio_class_attributes[] = {
33234 @@ -402,7 +403,7 @@ void uio_event_notify(struct uio_info *i
33235 {
33236 struct uio_device *idev = info->uio_dev;
33237
33238 - atomic_inc(&idev->event);
33239 + atomic_inc_unchecked(&idev->event);
33240 wake_up_interruptible(&idev->wait);
33241 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33242 }
33243 @@ -455,7 +456,7 @@ static int uio_open(struct inode *inode,
33244 }
33245
33246 listener->dev = idev;
33247 - listener->event_count = atomic_read(&idev->event);
33248 + listener->event_count = atomic_read_unchecked(&idev->event);
33249 filep->private_data = listener;
33250
33251 if (idev->info->open) {
33252 @@ -506,7 +507,7 @@ static unsigned int uio_poll(struct file
33253 return -EIO;
33254
33255 poll_wait(filep, &idev->wait, wait);
33256 - if (listener->event_count != atomic_read(&idev->event))
33257 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33258 return POLLIN | POLLRDNORM;
33259 return 0;
33260 }
33261 @@ -531,7 +532,7 @@ static ssize_t uio_read(struct file *fil
33262 do {
33263 set_current_state(TASK_INTERRUPTIBLE);
33264
33265 - event_count = atomic_read(&idev->event);
33266 + event_count = atomic_read_unchecked(&idev->event);
33267 if (event_count != listener->event_count) {
33268 if (copy_to_user(buf, &event_count, count))
33269 retval = -EFAULT;
33270 @@ -602,13 +603,13 @@ static int uio_find_mem_index(struct vm_
33271 static void uio_vma_open(struct vm_area_struct *vma)
33272 {
33273 struct uio_device *idev = vma->vm_private_data;
33274 - idev->vma_count++;
33275 + local_inc(&idev->vma_count);
33276 }
33277
33278 static void uio_vma_close(struct vm_area_struct *vma)
33279 {
33280 struct uio_device *idev = vma->vm_private_data;
33281 - idev->vma_count--;
33282 + local_dec(&idev->vma_count);
33283 }
33284
33285 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33286 @@ -819,7 +820,7 @@ int __uio_register_device(struct module
33287 idev->owner = owner;
33288 idev->info = info;
33289 init_waitqueue_head(&idev->wait);
33290 - atomic_set(&idev->event, 0);
33291 + atomic_set_unchecked(&idev->event, 0);
33292
33293 ret = uio_get_minor(idev);
33294 if (ret)
33295 diff -urNp linux-2.6.39.4/drivers/usb/atm/cxacru.c linux-2.6.39.4/drivers/usb/atm/cxacru.c
33296 --- linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-05-19 00:06:34.000000000 -0400
33297 +++ linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-08-05 19:44:37.000000000 -0400
33298 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33299 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33300 if (ret < 2)
33301 return -EINVAL;
33302 - if (index < 0 || index > 0x7f)
33303 + if (index > 0x7f)
33304 return -EINVAL;
33305 pos += tmp;
33306
33307 diff -urNp linux-2.6.39.4/drivers/usb/atm/usbatm.c linux-2.6.39.4/drivers/usb/atm/usbatm.c
33308 --- linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-05-19 00:06:34.000000000 -0400
33309 +++ linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-08-05 19:44:37.000000000 -0400
33310 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33311 if (printk_ratelimit())
33312 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33313 __func__, vpi, vci);
33314 - atomic_inc(&vcc->stats->rx_err);
33315 + atomic_inc_unchecked(&vcc->stats->rx_err);
33316 return;
33317 }
33318
33319 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33320 if (length > ATM_MAX_AAL5_PDU) {
33321 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33322 __func__, length, vcc);
33323 - atomic_inc(&vcc->stats->rx_err);
33324 + atomic_inc_unchecked(&vcc->stats->rx_err);
33325 goto out;
33326 }
33327
33328 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33329 if (sarb->len < pdu_length) {
33330 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33331 __func__, pdu_length, sarb->len, vcc);
33332 - atomic_inc(&vcc->stats->rx_err);
33333 + atomic_inc_unchecked(&vcc->stats->rx_err);
33334 goto out;
33335 }
33336
33337 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33338 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33339 __func__, vcc);
33340 - atomic_inc(&vcc->stats->rx_err);
33341 + atomic_inc_unchecked(&vcc->stats->rx_err);
33342 goto out;
33343 }
33344
33345 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33346 if (printk_ratelimit())
33347 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33348 __func__, length);
33349 - atomic_inc(&vcc->stats->rx_drop);
33350 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33351 goto out;
33352 }
33353
33354 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33355
33356 vcc->push(vcc, skb);
33357
33358 - atomic_inc(&vcc->stats->rx);
33359 + atomic_inc_unchecked(&vcc->stats->rx);
33360 out:
33361 skb_trim(sarb, 0);
33362 }
33363 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33364 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33365
33366 usbatm_pop(vcc, skb);
33367 - atomic_inc(&vcc->stats->tx);
33368 + atomic_inc_unchecked(&vcc->stats->tx);
33369
33370 skb = skb_dequeue(&instance->sndqueue);
33371 }
33372 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33373 if (!left--)
33374 return sprintf(page,
33375 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33376 - atomic_read(&atm_dev->stats.aal5.tx),
33377 - atomic_read(&atm_dev->stats.aal5.tx_err),
33378 - atomic_read(&atm_dev->stats.aal5.rx),
33379 - atomic_read(&atm_dev->stats.aal5.rx_err),
33380 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33381 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33382 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33383 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33384 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33385 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33386
33387 if (!left--) {
33388 if (instance->disconnected)
33389 diff -urNp linux-2.6.39.4/drivers/usb/core/devices.c linux-2.6.39.4/drivers/usb/core/devices.c
33390 --- linux-2.6.39.4/drivers/usb/core/devices.c 2011-05-19 00:06:34.000000000 -0400
33391 +++ linux-2.6.39.4/drivers/usb/core/devices.c 2011-08-05 19:44:37.000000000 -0400
33392 @@ -126,7 +126,7 @@ static const char *format_endpt =
33393 * time it gets called.
33394 */
33395 static struct device_connect_event {
33396 - atomic_t count;
33397 + atomic_unchecked_t count;
33398 wait_queue_head_t wait;
33399 } device_event = {
33400 .count = ATOMIC_INIT(1),
33401 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33402
33403 void usbfs_conn_disc_event(void)
33404 {
33405 - atomic_add(2, &device_event.count);
33406 + atomic_add_unchecked(2, &device_event.count);
33407 wake_up(&device_event.wait);
33408 }
33409
33410 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33411
33412 poll_wait(file, &device_event.wait, wait);
33413
33414 - event_count = atomic_read(&device_event.count);
33415 + event_count = atomic_read_unchecked(&device_event.count);
33416 if (file->f_version != event_count) {
33417 file->f_version = event_count;
33418 return POLLIN | POLLRDNORM;
33419 diff -urNp linux-2.6.39.4/drivers/usb/core/message.c linux-2.6.39.4/drivers/usb/core/message.c
33420 --- linux-2.6.39.4/drivers/usb/core/message.c 2011-07-09 09:18:51.000000000 -0400
33421 +++ linux-2.6.39.4/drivers/usb/core/message.c 2011-08-05 19:44:37.000000000 -0400
33422 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33423 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33424 if (buf) {
33425 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33426 - if (len > 0) {
33427 - smallbuf = kmalloc(++len, GFP_NOIO);
33428 + if (len++ > 0) {
33429 + smallbuf = kmalloc(len, GFP_NOIO);
33430 if (!smallbuf)
33431 return buf;
33432 memcpy(smallbuf, buf, len);
33433 diff -urNp linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c
33434 --- linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-05-19 00:06:34.000000000 -0400
33435 +++ linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-08-05 20:34:06.000000000 -0400
33436 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33437
33438 #ifdef CONFIG_KGDB
33439 static struct kgdb_io kgdbdbgp_io_ops;
33440 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33441 +static struct kgdb_io kgdbdbgp_io_ops_console;
33442 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33443 #else
33444 #define dbgp_kgdb_mode (0)
33445 #endif
33446 @@ -1032,6 +1033,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33447 .write_char = kgdbdbgp_write_char,
33448 };
33449
33450 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33451 + .name = "kgdbdbgp",
33452 + .read_char = kgdbdbgp_read_char,
33453 + .write_char = kgdbdbgp_write_char,
33454 + .is_console = 1
33455 +};
33456 +
33457 static int kgdbdbgp_wait_time;
33458
33459 static int __init kgdbdbgp_parse_config(char *str)
33460 @@ -1047,8 +1055,10 @@ static int __init kgdbdbgp_parse_config(
33461 ptr++;
33462 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33463 }
33464 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33465 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33466 + if (early_dbgp_console.index != -1)
33467 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33468 + else
33469 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33470
33471 return 0;
33472 }
33473 diff -urNp linux-2.6.39.4/drivers/usb/host/xhci-mem.c linux-2.6.39.4/drivers/usb/host/xhci-mem.c
33474 --- linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-06-25 12:55:23.000000000 -0400
33475 +++ linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-08-05 19:44:37.000000000 -0400
33476 @@ -1680,6 +1680,8 @@ static int xhci_check_trb_in_td_math(str
33477 unsigned int num_tests;
33478 int i, ret;
33479
33480 + pax_track_stack();
33481 +
33482 num_tests = ARRAY_SIZE(simple_test_vector);
33483 for (i = 0; i < num_tests; i++) {
33484 ret = xhci_test_trb_in_td(xhci,
33485 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h
33486 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-05-19 00:06:34.000000000 -0400
33487 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-08-05 19:44:37.000000000 -0400
33488 @@ -192,7 +192,7 @@ struct wahc {
33489 struct list_head xfer_delayed_list;
33490 spinlock_t xfer_list_lock;
33491 struct work_struct xfer_work;
33492 - atomic_t xfer_id_count;
33493 + atomic_unchecked_t xfer_id_count;
33494 };
33495
33496
33497 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33498 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33499 spin_lock_init(&wa->xfer_list_lock);
33500 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33501 - atomic_set(&wa->xfer_id_count, 1);
33502 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33503 }
33504
33505 /**
33506 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c
33507 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-05-19 00:06:34.000000000 -0400
33508 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-05 19:44:37.000000000 -0400
33509 @@ -294,7 +294,7 @@ out:
33510 */
33511 static void wa_xfer_id_init(struct wa_xfer *xfer)
33512 {
33513 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33514 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33515 }
33516
33517 /*
33518 diff -urNp linux-2.6.39.4/drivers/vhost/vhost.c linux-2.6.39.4/drivers/vhost/vhost.c
33519 --- linux-2.6.39.4/drivers/vhost/vhost.c 2011-05-19 00:06:34.000000000 -0400
33520 +++ linux-2.6.39.4/drivers/vhost/vhost.c 2011-08-05 19:44:37.000000000 -0400
33521 @@ -580,7 +580,7 @@ static int init_used(struct vhost_virtqu
33522 return get_user(vq->last_used_idx, &used->idx);
33523 }
33524
33525 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33526 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33527 {
33528 struct file *eventfp, *filep = NULL,
33529 *pollstart = NULL, *pollstop = NULL;
33530 diff -urNp linux-2.6.39.4/drivers/video/fbcmap.c linux-2.6.39.4/drivers/video/fbcmap.c
33531 --- linux-2.6.39.4/drivers/video/fbcmap.c 2011-05-19 00:06:34.000000000 -0400
33532 +++ linux-2.6.39.4/drivers/video/fbcmap.c 2011-08-05 19:44:37.000000000 -0400
33533 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33534 rc = -ENODEV;
33535 goto out;
33536 }
33537 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33538 - !info->fbops->fb_setcmap)) {
33539 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33540 rc = -EINVAL;
33541 goto out1;
33542 }
33543 diff -urNp linux-2.6.39.4/drivers/video/fbmem.c linux-2.6.39.4/drivers/video/fbmem.c
33544 --- linux-2.6.39.4/drivers/video/fbmem.c 2011-05-19 00:06:34.000000000 -0400
33545 +++ linux-2.6.39.4/drivers/video/fbmem.c 2011-08-05 19:44:37.000000000 -0400
33546 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33547 image->dx += image->width + 8;
33548 }
33549 } else if (rotate == FB_ROTATE_UD) {
33550 - for (x = 0; x < num && image->dx >= 0; x++) {
33551 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33552 info->fbops->fb_imageblit(info, image);
33553 image->dx -= image->width + 8;
33554 }
33555 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33556 image->dy += image->height + 8;
33557 }
33558 } else if (rotate == FB_ROTATE_CCW) {
33559 - for (x = 0; x < num && image->dy >= 0; x++) {
33560 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33561 info->fbops->fb_imageblit(info, image);
33562 image->dy -= image->height + 8;
33563 }
33564 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33565 int flags = info->flags;
33566 int ret = 0;
33567
33568 + pax_track_stack();
33569 +
33570 if (var->activate & FB_ACTIVATE_INV_MODE) {
33571 struct fb_videomode mode1, mode2;
33572
33573 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33574 void __user *argp = (void __user *)arg;
33575 long ret = 0;
33576
33577 + pax_track_stack();
33578 +
33579 switch (cmd) {
33580 case FBIOGET_VSCREENINFO:
33581 if (!lock_fb_info(info))
33582 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33583 return -EFAULT;
33584 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33585 return -EINVAL;
33586 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33587 + if (con2fb.framebuffer >= FB_MAX)
33588 return -EINVAL;
33589 if (!registered_fb[con2fb.framebuffer])
33590 request_module("fb%d", con2fb.framebuffer);
33591 diff -urNp linux-2.6.39.4/drivers/video/i810/i810_accel.c linux-2.6.39.4/drivers/video/i810/i810_accel.c
33592 --- linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-05-19 00:06:34.000000000 -0400
33593 +++ linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-08-05 19:44:37.000000000 -0400
33594 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33595 }
33596 }
33597 printk("ringbuffer lockup!!!\n");
33598 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33599 i810_report_error(mmio);
33600 par->dev_flags |= LOCKUP;
33601 info->pixmap.scan_align = 1;
33602 diff -urNp linux-2.6.39.4/drivers/video/udlfb.c linux-2.6.39.4/drivers/video/udlfb.c
33603 --- linux-2.6.39.4/drivers/video/udlfb.c 2011-05-19 00:06:34.000000000 -0400
33604 +++ linux-2.6.39.4/drivers/video/udlfb.c 2011-08-05 19:44:37.000000000 -0400
33605 @@ -584,11 +584,11 @@ int dlfb_handle_damage(struct dlfb_data
33606 dlfb_urb_completion(urb);
33607
33608 error:
33609 - atomic_add(bytes_sent, &dev->bytes_sent);
33610 - atomic_add(bytes_identical, &dev->bytes_identical);
33611 - atomic_add(width*height*2, &dev->bytes_rendered);
33612 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33613 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33614 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33615 end_cycles = get_cycles();
33616 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33617 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33618 >> 10)), /* Kcycles */
33619 &dev->cpu_kcycles_used);
33620
33621 @@ -709,11 +709,11 @@ static void dlfb_dpy_deferred_io(struct
33622 dlfb_urb_completion(urb);
33623
33624 error:
33625 - atomic_add(bytes_sent, &dev->bytes_sent);
33626 - atomic_add(bytes_identical, &dev->bytes_identical);
33627 - atomic_add(bytes_rendered, &dev->bytes_rendered);
33628 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33629 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33630 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33631 end_cycles = get_cycles();
33632 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33633 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33634 >> 10)), /* Kcycles */
33635 &dev->cpu_kcycles_used);
33636 }
33637 @@ -1301,7 +1301,7 @@ static ssize_t metrics_bytes_rendered_sh
33638 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33639 struct dlfb_data *dev = fb_info->par;
33640 return snprintf(buf, PAGE_SIZE, "%u\n",
33641 - atomic_read(&dev->bytes_rendered));
33642 + atomic_read_unchecked(&dev->bytes_rendered));
33643 }
33644
33645 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33646 @@ -1309,7 +1309,7 @@ static ssize_t metrics_bytes_identical_s
33647 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33648 struct dlfb_data *dev = fb_info->par;
33649 return snprintf(buf, PAGE_SIZE, "%u\n",
33650 - atomic_read(&dev->bytes_identical));
33651 + atomic_read_unchecked(&dev->bytes_identical));
33652 }
33653
33654 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33655 @@ -1317,7 +1317,7 @@ static ssize_t metrics_bytes_sent_show(s
33656 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33657 struct dlfb_data *dev = fb_info->par;
33658 return snprintf(buf, PAGE_SIZE, "%u\n",
33659 - atomic_read(&dev->bytes_sent));
33660 + atomic_read_unchecked(&dev->bytes_sent));
33661 }
33662
33663 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33664 @@ -1325,7 +1325,7 @@ static ssize_t metrics_cpu_kcycles_used_
33665 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33666 struct dlfb_data *dev = fb_info->par;
33667 return snprintf(buf, PAGE_SIZE, "%u\n",
33668 - atomic_read(&dev->cpu_kcycles_used));
33669 + atomic_read_unchecked(&dev->cpu_kcycles_used));
33670 }
33671
33672 static ssize_t edid_show(
33673 @@ -1382,10 +1382,10 @@ static ssize_t metrics_reset_store(struc
33674 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33675 struct dlfb_data *dev = fb_info->par;
33676
33677 - atomic_set(&dev->bytes_rendered, 0);
33678 - atomic_set(&dev->bytes_identical, 0);
33679 - atomic_set(&dev->bytes_sent, 0);
33680 - atomic_set(&dev->cpu_kcycles_used, 0);
33681 + atomic_set_unchecked(&dev->bytes_rendered, 0);
33682 + atomic_set_unchecked(&dev->bytes_identical, 0);
33683 + atomic_set_unchecked(&dev->bytes_sent, 0);
33684 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33685
33686 return count;
33687 }
33688 diff -urNp linux-2.6.39.4/drivers/video/uvesafb.c linux-2.6.39.4/drivers/video/uvesafb.c
33689 --- linux-2.6.39.4/drivers/video/uvesafb.c 2011-05-19 00:06:34.000000000 -0400
33690 +++ linux-2.6.39.4/drivers/video/uvesafb.c 2011-08-05 20:34:06.000000000 -0400
33691 @@ -19,6 +19,7 @@
33692 #include <linux/io.h>
33693 #include <linux/mutex.h>
33694 #include <linux/slab.h>
33695 +#include <linux/moduleloader.h>
33696 #include <video/edid.h>
33697 #include <video/uvesafb.h>
33698 #ifdef CONFIG_X86
33699 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33700 NULL,
33701 };
33702
33703 - return call_usermodehelper(v86d_path, argv, envp, 1);
33704 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33705 }
33706
33707 /*
33708 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33709 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33710 par->pmi_setpal = par->ypan = 0;
33711 } else {
33712 +
33713 +#ifdef CONFIG_PAX_KERNEXEC
33714 +#ifdef CONFIG_MODULES
33715 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33716 +#endif
33717 + if (!par->pmi_code) {
33718 + par->pmi_setpal = par->ypan = 0;
33719 + return 0;
33720 + }
33721 +#endif
33722 +
33723 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33724 + task->t.regs.edi);
33725 +
33726 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33727 + pax_open_kernel();
33728 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33729 + pax_close_kernel();
33730 +
33731 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33732 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33733 +#else
33734 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33735 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33736 +#endif
33737 +
33738 printk(KERN_INFO "uvesafb: protected mode interface info at "
33739 "%04x:%04x\n",
33740 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33741 @@ -1821,6 +1844,11 @@ out:
33742 if (par->vbe_modes)
33743 kfree(par->vbe_modes);
33744
33745 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33746 + if (par->pmi_code)
33747 + module_free_exec(NULL, par->pmi_code);
33748 +#endif
33749 +
33750 framebuffer_release(info);
33751 return err;
33752 }
33753 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33754 kfree(par->vbe_state_orig);
33755 if (par->vbe_state_saved)
33756 kfree(par->vbe_state_saved);
33757 +
33758 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33759 + if (par->pmi_code)
33760 + module_free_exec(NULL, par->pmi_code);
33761 +#endif
33762 +
33763 }
33764
33765 framebuffer_release(info);
33766 diff -urNp linux-2.6.39.4/drivers/video/vesafb.c linux-2.6.39.4/drivers/video/vesafb.c
33767 --- linux-2.6.39.4/drivers/video/vesafb.c 2011-05-19 00:06:34.000000000 -0400
33768 +++ linux-2.6.39.4/drivers/video/vesafb.c 2011-08-05 20:34:06.000000000 -0400
33769 @@ -9,6 +9,7 @@
33770 */
33771
33772 #include <linux/module.h>
33773 +#include <linux/moduleloader.h>
33774 #include <linux/kernel.h>
33775 #include <linux/errno.h>
33776 #include <linux/string.h>
33777 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33778 static int vram_total __initdata; /* Set total amount of memory */
33779 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33780 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33781 -static void (*pmi_start)(void) __read_mostly;
33782 -static void (*pmi_pal) (void) __read_mostly;
33783 +static void (*pmi_start)(void) __read_only;
33784 +static void (*pmi_pal) (void) __read_only;
33785 static int depth __read_mostly;
33786 static int vga_compat __read_mostly;
33787 /* --------------------------------------------------------------------- */
33788 @@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
33789 unsigned int size_vmode;
33790 unsigned int size_remap;
33791 unsigned int size_total;
33792 + void *pmi_code = NULL;
33793
33794 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33795 return -ENODEV;
33796 @@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
33797 size_remap = size_total;
33798 vesafb_fix.smem_len = size_remap;
33799
33800 -#ifndef __i386__
33801 - screen_info.vesapm_seg = 0;
33802 -#endif
33803 -
33804 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33805 printk(KERN_WARNING
33806 "vesafb: cannot reserve video memory at 0x%lx\n",
33807 @@ -306,9 +304,21 @@ static int __init vesafb_probe(struct pl
33808 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33809 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33810
33811 +#ifdef __i386__
33812 +
33813 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33814 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
33815 + if (!pmi_code)
33816 +#elif !defined(CONFIG_PAX_KERNEXEC)
33817 + if (0)
33818 +#endif
33819 +
33820 +#endif
33821 + screen_info.vesapm_seg = 0;
33822 +
33823 if (screen_info.vesapm_seg) {
33824 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33825 - screen_info.vesapm_seg,screen_info.vesapm_off);
33826 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33827 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33828 }
33829
33830 if (screen_info.vesapm_seg < 0xc000)
33831 @@ -316,9 +326,25 @@ static int __init vesafb_probe(struct pl
33832
33833 if (ypan || pmi_setpal) {
33834 unsigned short *pmi_base;
33835 +
33836 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33837 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33838 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33839 +
33840 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33841 + pax_open_kernel();
33842 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33843 +#else
33844 + pmi_code = pmi_base;
33845 +#endif
33846 +
33847 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33848 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33849 +
33850 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33851 + pmi_start = ktva_ktla(pmi_start);
33852 + pmi_pal = ktva_ktla(pmi_pal);
33853 + pax_close_kernel();
33854 +#endif
33855 +
33856 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33857 if (pmi_base[3]) {
33858 printk(KERN_INFO "vesafb: pmi: ports = ");
33859 @@ -487,6 +513,11 @@ static int __init vesafb_probe(struct pl
33860 info->node, info->fix.id);
33861 return 0;
33862 err:
33863 +
33864 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33865 + module_free_exec(NULL, pmi_code);
33866 +#endif
33867 +
33868 if (info->screen_base)
33869 iounmap(info->screen_base);
33870 framebuffer_release(info);
33871 diff -urNp linux-2.6.39.4/drivers/virtio/virtio_balloon.c linux-2.6.39.4/drivers/virtio/virtio_balloon.c
33872 --- linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-05-19 00:06:34.000000000 -0400
33873 +++ linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-08-05 19:44:37.000000000 -0400
33874 @@ -176,6 +176,8 @@ static void update_balloon_stats(struct
33875 struct sysinfo i;
33876 int idx = 0;
33877
33878 + pax_track_stack();
33879 +
33880 all_vm_events(events);
33881 si_meminfo(&i);
33882
33883 diff -urNp linux-2.6.39.4/fs/9p/vfs_inode.c linux-2.6.39.4/fs/9p/vfs_inode.c
33884 --- linux-2.6.39.4/fs/9p/vfs_inode.c 2011-05-19 00:06:34.000000000 -0400
33885 +++ linux-2.6.39.4/fs/9p/vfs_inode.c 2011-08-05 19:44:37.000000000 -0400
33886 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33887 void
33888 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33889 {
33890 - char *s = nd_get_link(nd);
33891 + const char *s = nd_get_link(nd);
33892
33893 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33894 IS_ERR(s) ? "<error>" : s);
33895 diff -urNp linux-2.6.39.4/fs/aio.c linux-2.6.39.4/fs/aio.c
33896 --- linux-2.6.39.4/fs/aio.c 2011-05-19 00:06:34.000000000 -0400
33897 +++ linux-2.6.39.4/fs/aio.c 2011-08-05 19:44:37.000000000 -0400
33898 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33899 size += sizeof(struct io_event) * nr_events;
33900 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33901
33902 - if (nr_pages < 0)
33903 + if (nr_pages <= 0)
33904 return -EINVAL;
33905
33906 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33907 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33908 struct aio_timeout to;
33909 int retry = 0;
33910
33911 + pax_track_stack();
33912 +
33913 /* needed to zero any padding within an entry (there shouldn't be
33914 * any, but C is fun!
33915 */
33916 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33917 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33918 {
33919 ssize_t ret;
33920 + struct iovec iovstack;
33921
33922 #ifdef CONFIG_COMPAT
33923 if (compat)
33924 ret = compat_rw_copy_check_uvector(type,
33925 (struct compat_iovec __user *)kiocb->ki_buf,
33926 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33927 + kiocb->ki_nbytes, 1, &iovstack,
33928 &kiocb->ki_iovec);
33929 else
33930 #endif
33931 ret = rw_copy_check_uvector(type,
33932 (struct iovec __user *)kiocb->ki_buf,
33933 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33934 + kiocb->ki_nbytes, 1, &iovstack,
33935 &kiocb->ki_iovec);
33936 if (ret < 0)
33937 goto out;
33938
33939 + if (kiocb->ki_iovec == &iovstack) {
33940 + kiocb->ki_inline_vec = iovstack;
33941 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
33942 + }
33943 kiocb->ki_nr_segs = kiocb->ki_nbytes;
33944 kiocb->ki_cur_seg = 0;
33945 /* ki_nbytes/left now reflect bytes instead of segs */
33946 diff -urNp linux-2.6.39.4/fs/attr.c linux-2.6.39.4/fs/attr.c
33947 --- linux-2.6.39.4/fs/attr.c 2011-05-19 00:06:34.000000000 -0400
33948 +++ linux-2.6.39.4/fs/attr.c 2011-08-05 19:44:37.000000000 -0400
33949 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
33950 unsigned long limit;
33951
33952 limit = rlimit(RLIMIT_FSIZE);
33953 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
33954 if (limit != RLIM_INFINITY && offset > limit)
33955 goto out_sig;
33956 if (offset > inode->i_sb->s_maxbytes)
33957 diff -urNp linux-2.6.39.4/fs/befs/linuxvfs.c linux-2.6.39.4/fs/befs/linuxvfs.c
33958 --- linux-2.6.39.4/fs/befs/linuxvfs.c 2011-05-19 00:06:34.000000000 -0400
33959 +++ linux-2.6.39.4/fs/befs/linuxvfs.c 2011-08-05 19:44:37.000000000 -0400
33960 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
33961 {
33962 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
33963 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
33964 - char *link = nd_get_link(nd);
33965 + const char *link = nd_get_link(nd);
33966 if (!IS_ERR(link))
33967 kfree(link);
33968 }
33969 diff -urNp linux-2.6.39.4/fs/binfmt_aout.c linux-2.6.39.4/fs/binfmt_aout.c
33970 --- linux-2.6.39.4/fs/binfmt_aout.c 2011-05-19 00:06:34.000000000 -0400
33971 +++ linux-2.6.39.4/fs/binfmt_aout.c 2011-08-05 19:44:37.000000000 -0400
33972 @@ -16,6 +16,7 @@
33973 #include <linux/string.h>
33974 #include <linux/fs.h>
33975 #include <linux/file.h>
33976 +#include <linux/security.h>
33977 #include <linux/stat.h>
33978 #include <linux/fcntl.h>
33979 #include <linux/ptrace.h>
33980 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
33981 #endif
33982 # define START_STACK(u) ((void __user *)u.start_stack)
33983
33984 + memset(&dump, 0, sizeof(dump));
33985 +
33986 fs = get_fs();
33987 set_fs(KERNEL_DS);
33988 has_dumped = 1;
33989 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
33990
33991 /* If the size of the dump file exceeds the rlimit, then see what would happen
33992 if we wrote the stack, but not the data area. */
33993 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
33994 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
33995 dump.u_dsize = 0;
33996
33997 /* Make sure we have enough room to write the stack and data areas. */
33998 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
33999 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
34000 dump.u_ssize = 0;
34001
34002 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
34003 rlim = rlimit(RLIMIT_DATA);
34004 if (rlim >= RLIM_INFINITY)
34005 rlim = ~0;
34006 +
34007 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
34008 if (ex.a_data + ex.a_bss > rlim)
34009 return -ENOMEM;
34010
34011 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
34012 install_exec_creds(bprm);
34013 current->flags &= ~PF_FORKNOEXEC;
34014
34015 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34016 + current->mm->pax_flags = 0UL;
34017 +#endif
34018 +
34019 +#ifdef CONFIG_PAX_PAGEEXEC
34020 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
34021 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
34022 +
34023 +#ifdef CONFIG_PAX_EMUTRAMP
34024 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
34025 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
34026 +#endif
34027 +
34028 +#ifdef CONFIG_PAX_MPROTECT
34029 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
34030 + current->mm->pax_flags |= MF_PAX_MPROTECT;
34031 +#endif
34032 +
34033 + }
34034 +#endif
34035 +
34036 if (N_MAGIC(ex) == OMAGIC) {
34037 unsigned long text_addr, map_size;
34038 loff_t pos;
34039 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
34040
34041 down_write(&current->mm->mmap_sem);
34042 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
34043 - PROT_READ | PROT_WRITE | PROT_EXEC,
34044 + PROT_READ | PROT_WRITE,
34045 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
34046 fd_offset + ex.a_text);
34047 up_write(&current->mm->mmap_sem);
34048 diff -urNp linux-2.6.39.4/fs/binfmt_elf.c linux-2.6.39.4/fs/binfmt_elf.c
34049 --- linux-2.6.39.4/fs/binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
34050 +++ linux-2.6.39.4/fs/binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
34051 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34052 #define elf_core_dump NULL
34053 #endif
34054
34055 +#ifdef CONFIG_PAX_MPROTECT
34056 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34057 +#endif
34058 +
34059 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34060 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34061 #else
34062 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34063 .load_binary = load_elf_binary,
34064 .load_shlib = load_elf_library,
34065 .core_dump = elf_core_dump,
34066 +
34067 +#ifdef CONFIG_PAX_MPROTECT
34068 + .handle_mprotect= elf_handle_mprotect,
34069 +#endif
34070 +
34071 .min_coredump = ELF_EXEC_PAGESIZE,
34072 };
34073
34074 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34075
34076 static int set_brk(unsigned long start, unsigned long end)
34077 {
34078 + unsigned long e = end;
34079 +
34080 start = ELF_PAGEALIGN(start);
34081 end = ELF_PAGEALIGN(end);
34082 if (end > start) {
34083 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34084 if (BAD_ADDR(addr))
34085 return addr;
34086 }
34087 - current->mm->start_brk = current->mm->brk = end;
34088 + current->mm->start_brk = current->mm->brk = e;
34089 return 0;
34090 }
34091
34092 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34093 elf_addr_t __user *u_rand_bytes;
34094 const char *k_platform = ELF_PLATFORM;
34095 const char *k_base_platform = ELF_BASE_PLATFORM;
34096 - unsigned char k_rand_bytes[16];
34097 + u32 k_rand_bytes[4];
34098 int items;
34099 elf_addr_t *elf_info;
34100 int ei_index = 0;
34101 const struct cred *cred = current_cred();
34102 struct vm_area_struct *vma;
34103 + unsigned long saved_auxv[AT_VECTOR_SIZE];
34104 +
34105 + pax_track_stack();
34106
34107 /*
34108 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34109 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34110 * Generate 16 random bytes for userspace PRNG seeding.
34111 */
34112 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34113 - u_rand_bytes = (elf_addr_t __user *)
34114 - STACK_ALLOC(p, sizeof(k_rand_bytes));
34115 + srandom32(k_rand_bytes[0] ^ random32());
34116 + srandom32(k_rand_bytes[1] ^ random32());
34117 + srandom32(k_rand_bytes[2] ^ random32());
34118 + srandom32(k_rand_bytes[3] ^ random32());
34119 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
34120 + u_rand_bytes = (elf_addr_t __user *) p;
34121 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34122 return -EFAULT;
34123
34124 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34125 return -EFAULT;
34126 current->mm->env_end = p;
34127
34128 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34129 +
34130 /* Put the elf_info on the stack in the right place. */
34131 sp = (elf_addr_t __user *)envp + 1;
34132 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34133 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34134 return -EFAULT;
34135 return 0;
34136 }
34137 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34138 {
34139 struct elf_phdr *elf_phdata;
34140 struct elf_phdr *eppnt;
34141 - unsigned long load_addr = 0;
34142 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34143 int load_addr_set = 0;
34144 unsigned long last_bss = 0, elf_bss = 0;
34145 - unsigned long error = ~0UL;
34146 + unsigned long error = -EINVAL;
34147 unsigned long total_size;
34148 int retval, i, size;
34149
34150 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34151 goto out_close;
34152 }
34153
34154 +#ifdef CONFIG_PAX_SEGMEXEC
34155 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34156 + pax_task_size = SEGMEXEC_TASK_SIZE;
34157 +#endif
34158 +
34159 eppnt = elf_phdata;
34160 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34161 if (eppnt->p_type == PT_LOAD) {
34162 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34163 k = load_addr + eppnt->p_vaddr;
34164 if (BAD_ADDR(k) ||
34165 eppnt->p_filesz > eppnt->p_memsz ||
34166 - eppnt->p_memsz > TASK_SIZE ||
34167 - TASK_SIZE - eppnt->p_memsz < k) {
34168 + eppnt->p_memsz > pax_task_size ||
34169 + pax_task_size - eppnt->p_memsz < k) {
34170 error = -ENOMEM;
34171 goto out_close;
34172 }
34173 @@ -528,6 +553,193 @@ out:
34174 return error;
34175 }
34176
34177 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34178 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34179 +{
34180 + unsigned long pax_flags = 0UL;
34181 +
34182 +#ifdef CONFIG_PAX_PAGEEXEC
34183 + if (elf_phdata->p_flags & PF_PAGEEXEC)
34184 + pax_flags |= MF_PAX_PAGEEXEC;
34185 +#endif
34186 +
34187 +#ifdef CONFIG_PAX_SEGMEXEC
34188 + if (elf_phdata->p_flags & PF_SEGMEXEC)
34189 + pax_flags |= MF_PAX_SEGMEXEC;
34190 +#endif
34191 +
34192 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34193 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34194 + if ((__supported_pte_mask & _PAGE_NX))
34195 + pax_flags &= ~MF_PAX_SEGMEXEC;
34196 + else
34197 + pax_flags &= ~MF_PAX_PAGEEXEC;
34198 + }
34199 +#endif
34200 +
34201 +#ifdef CONFIG_PAX_EMUTRAMP
34202 + if (elf_phdata->p_flags & PF_EMUTRAMP)
34203 + pax_flags |= MF_PAX_EMUTRAMP;
34204 +#endif
34205 +
34206 +#ifdef CONFIG_PAX_MPROTECT
34207 + if (elf_phdata->p_flags & PF_MPROTECT)
34208 + pax_flags |= MF_PAX_MPROTECT;
34209 +#endif
34210 +
34211 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34212 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34213 + pax_flags |= MF_PAX_RANDMMAP;
34214 +#endif
34215 +
34216 + return pax_flags;
34217 +}
34218 +#endif
34219 +
34220 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34221 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34222 +{
34223 + unsigned long pax_flags = 0UL;
34224 +
34225 +#ifdef CONFIG_PAX_PAGEEXEC
34226 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34227 + pax_flags |= MF_PAX_PAGEEXEC;
34228 +#endif
34229 +
34230 +#ifdef CONFIG_PAX_SEGMEXEC
34231 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34232 + pax_flags |= MF_PAX_SEGMEXEC;
34233 +#endif
34234 +
34235 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34236 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34237 + if ((__supported_pte_mask & _PAGE_NX))
34238 + pax_flags &= ~MF_PAX_SEGMEXEC;
34239 + else
34240 + pax_flags &= ~MF_PAX_PAGEEXEC;
34241 + }
34242 +#endif
34243 +
34244 +#ifdef CONFIG_PAX_EMUTRAMP
34245 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34246 + pax_flags |= MF_PAX_EMUTRAMP;
34247 +#endif
34248 +
34249 +#ifdef CONFIG_PAX_MPROTECT
34250 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34251 + pax_flags |= MF_PAX_MPROTECT;
34252 +#endif
34253 +
34254 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34255 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34256 + pax_flags |= MF_PAX_RANDMMAP;
34257 +#endif
34258 +
34259 + return pax_flags;
34260 +}
34261 +#endif
34262 +
34263 +#ifdef CONFIG_PAX_EI_PAX
34264 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34265 +{
34266 + unsigned long pax_flags = 0UL;
34267 +
34268 +#ifdef CONFIG_PAX_PAGEEXEC
34269 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34270 + pax_flags |= MF_PAX_PAGEEXEC;
34271 +#endif
34272 +
34273 +#ifdef CONFIG_PAX_SEGMEXEC
34274 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34275 + pax_flags |= MF_PAX_SEGMEXEC;
34276 +#endif
34277 +
34278 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34279 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34280 + if ((__supported_pte_mask & _PAGE_NX))
34281 + pax_flags &= ~MF_PAX_SEGMEXEC;
34282 + else
34283 + pax_flags &= ~MF_PAX_PAGEEXEC;
34284 + }
34285 +#endif
34286 +
34287 +#ifdef CONFIG_PAX_EMUTRAMP
34288 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34289 + pax_flags |= MF_PAX_EMUTRAMP;
34290 +#endif
34291 +
34292 +#ifdef CONFIG_PAX_MPROTECT
34293 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34294 + pax_flags |= MF_PAX_MPROTECT;
34295 +#endif
34296 +
34297 +#ifdef CONFIG_PAX_ASLR
34298 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34299 + pax_flags |= MF_PAX_RANDMMAP;
34300 +#endif
34301 +
34302 + return pax_flags;
34303 +}
34304 +#endif
34305 +
34306 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34307 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34308 +{
34309 + unsigned long pax_flags = 0UL;
34310 +
34311 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34312 + unsigned long i;
34313 + int found_flags = 0;
34314 +#endif
34315 +
34316 +#ifdef CONFIG_PAX_EI_PAX
34317 + pax_flags = pax_parse_ei_pax(elf_ex);
34318 +#endif
34319 +
34320 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34321 + for (i = 0UL; i < elf_ex->e_phnum; i++)
34322 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34323 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34324 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34325 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34326 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34327 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34328 + return -EINVAL;
34329 +
34330 +#ifdef CONFIG_PAX_SOFTMODE
34331 + if (pax_softmode)
34332 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
34333 + else
34334 +#endif
34335 +
34336 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34337 + found_flags = 1;
34338 + break;
34339 + }
34340 +#endif
34341 +
34342 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34343 + if (found_flags == 0) {
34344 + struct elf_phdr phdr;
34345 + memset(&phdr, 0, sizeof(phdr));
34346 + phdr.p_flags = PF_NOEMUTRAMP;
34347 +#ifdef CONFIG_PAX_SOFTMODE
34348 + if (pax_softmode)
34349 + pax_flags = pax_parse_softmode(&phdr);
34350 + else
34351 +#endif
34352 + pax_flags = pax_parse_hardmode(&phdr);
34353 + }
34354 +#endif
34355 +
34356 + if (0 > pax_check_flags(&pax_flags))
34357 + return -EINVAL;
34358 +
34359 + current->mm->pax_flags = pax_flags;
34360 + return 0;
34361 +}
34362 +#endif
34363 +
34364 /*
34365 * These are the functions used to load ELF style executables and shared
34366 * libraries. There is no binary dependent code anywhere else.
34367 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34368 {
34369 unsigned int random_variable = 0;
34370
34371 +#ifdef CONFIG_PAX_RANDUSTACK
34372 + if (randomize_va_space)
34373 + return stack_top - current->mm->delta_stack;
34374 +#endif
34375 +
34376 if ((current->flags & PF_RANDOMIZE) &&
34377 !(current->personality & ADDR_NO_RANDOMIZE)) {
34378 random_variable = get_random_int() & STACK_RND_MASK;
34379 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34380 unsigned long load_addr = 0, load_bias = 0;
34381 int load_addr_set = 0;
34382 char * elf_interpreter = NULL;
34383 - unsigned long error;
34384 + unsigned long error = 0;
34385 struct elf_phdr *elf_ppnt, *elf_phdata;
34386 unsigned long elf_bss, elf_brk;
34387 int retval, i;
34388 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34389 unsigned long start_code, end_code, start_data, end_data;
34390 unsigned long reloc_func_desc __maybe_unused = 0;
34391 int executable_stack = EXSTACK_DEFAULT;
34392 - unsigned long def_flags = 0;
34393 struct {
34394 struct elfhdr elf_ex;
34395 struct elfhdr interp_elf_ex;
34396 } *loc;
34397 + unsigned long pax_task_size = TASK_SIZE;
34398
34399 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34400 if (!loc) {
34401 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34402
34403 /* OK, This is the point of no return */
34404 current->flags &= ~PF_FORKNOEXEC;
34405 - current->mm->def_flags = def_flags;
34406 +
34407 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34408 + current->mm->pax_flags = 0UL;
34409 +#endif
34410 +
34411 +#ifdef CONFIG_PAX_DLRESOLVE
34412 + current->mm->call_dl_resolve = 0UL;
34413 +#endif
34414 +
34415 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34416 + current->mm->call_syscall = 0UL;
34417 +#endif
34418 +
34419 +#ifdef CONFIG_PAX_ASLR
34420 + current->mm->delta_mmap = 0UL;
34421 + current->mm->delta_stack = 0UL;
34422 +#endif
34423 +
34424 + current->mm->def_flags = 0;
34425 +
34426 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34427 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34428 + send_sig(SIGKILL, current, 0);
34429 + goto out_free_dentry;
34430 + }
34431 +#endif
34432 +
34433 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34434 + pax_set_initial_flags(bprm);
34435 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34436 + if (pax_set_initial_flags_func)
34437 + (pax_set_initial_flags_func)(bprm);
34438 +#endif
34439 +
34440 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34441 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34442 + current->mm->context.user_cs_limit = PAGE_SIZE;
34443 + current->mm->def_flags |= VM_PAGEEXEC;
34444 + }
34445 +#endif
34446 +
34447 +#ifdef CONFIG_PAX_SEGMEXEC
34448 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34449 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34450 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34451 + pax_task_size = SEGMEXEC_TASK_SIZE;
34452 + current->mm->def_flags |= VM_NOHUGEPAGE;
34453 + }
34454 +#endif
34455 +
34456 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34457 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34458 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34459 + put_cpu();
34460 + }
34461 +#endif
34462
34463 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34464 may depend on the personality. */
34465 SET_PERSONALITY(loc->elf_ex);
34466 +
34467 +#ifdef CONFIG_PAX_ASLR
34468 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34469 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34470 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34471 + }
34472 +#endif
34473 +
34474 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34475 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34476 + executable_stack = EXSTACK_DISABLE_X;
34477 + current->personality &= ~READ_IMPLIES_EXEC;
34478 + } else
34479 +#endif
34480 +
34481 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34482 current->personality |= READ_IMPLIES_EXEC;
34483
34484 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34485 #else
34486 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34487 #endif
34488 +
34489 +#ifdef CONFIG_PAX_RANDMMAP
34490 + /* PaX: randomize base address at the default exe base if requested */
34491 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34492 +#ifdef CONFIG_SPARC64
34493 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34494 +#else
34495 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34496 +#endif
34497 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34498 + elf_flags |= MAP_FIXED;
34499 + }
34500 +#endif
34501 +
34502 }
34503
34504 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34505 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34506 * allowed task size. Note that p_filesz must always be
34507 * <= p_memsz so it is only necessary to check p_memsz.
34508 */
34509 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34510 - elf_ppnt->p_memsz > TASK_SIZE ||
34511 - TASK_SIZE - elf_ppnt->p_memsz < k) {
34512 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34513 + elf_ppnt->p_memsz > pax_task_size ||
34514 + pax_task_size - elf_ppnt->p_memsz < k) {
34515 /* set_brk can never work. Avoid overflows. */
34516 send_sig(SIGKILL, current, 0);
34517 retval = -EINVAL;
34518 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34519 start_data += load_bias;
34520 end_data += load_bias;
34521
34522 +#ifdef CONFIG_PAX_RANDMMAP
34523 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34524 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34525 +#endif
34526 +
34527 /* Calling set_brk effectively mmaps the pages that we need
34528 * for the bss and break sections. We must do this before
34529 * mapping in the interpreter, to make sure it doesn't wind
34530 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34531 goto out_free_dentry;
34532 }
34533 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34534 - send_sig(SIGSEGV, current, 0);
34535 - retval = -EFAULT; /* Nobody gets to see this, but.. */
34536 - goto out_free_dentry;
34537 + /*
34538 + * This bss-zeroing can fail if the ELF
34539 + * file specifies odd protections. So
34540 + * we don't check the return value
34541 + */
34542 }
34543
34544 if (elf_interpreter) {
34545 @@ -1090,7 +1398,7 @@ out:
34546 * Decide what to dump of a segment, part, all or none.
34547 */
34548 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34549 - unsigned long mm_flags)
34550 + unsigned long mm_flags, long signr)
34551 {
34552 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34553
34554 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34555 if (vma->vm_file == NULL)
34556 return 0;
34557
34558 - if (FILTER(MAPPED_PRIVATE))
34559 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34560 goto whole;
34561
34562 /*
34563 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34564 {
34565 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34566 int i = 0;
34567 - do
34568 + do {
34569 i += 2;
34570 - while (auxv[i - 2] != AT_NULL);
34571 + } while (auxv[i - 2] != AT_NULL);
34572 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34573 }
34574
34575 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34576 }
34577
34578 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34579 - unsigned long mm_flags)
34580 + struct coredump_params *cprm)
34581 {
34582 struct vm_area_struct *vma;
34583 size_t size = 0;
34584
34585 for (vma = first_vma(current, gate_vma); vma != NULL;
34586 vma = next_vma(vma, gate_vma))
34587 - size += vma_dump_size(vma, mm_flags);
34588 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34589 return size;
34590 }
34591
34592 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34593
34594 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34595
34596 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34597 + offset += elf_core_vma_data_size(gate_vma, cprm);
34598 offset += elf_core_extra_data_size();
34599 e_shoff = offset;
34600
34601 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34602 offset = dataoff;
34603
34604 size += sizeof(*elf);
34605 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34606 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34607 goto end_coredump;
34608
34609 size += sizeof(*phdr4note);
34610 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34611 if (size > cprm->limit
34612 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34613 goto end_coredump;
34614 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34615 phdr.p_offset = offset;
34616 phdr.p_vaddr = vma->vm_start;
34617 phdr.p_paddr = 0;
34618 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34619 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34620 phdr.p_memsz = vma->vm_end - vma->vm_start;
34621 offset += phdr.p_filesz;
34622 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34623 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34624 phdr.p_align = ELF_EXEC_PAGESIZE;
34625
34626 size += sizeof(phdr);
34627 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34628 if (size > cprm->limit
34629 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34630 goto end_coredump;
34631 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34632 unsigned long addr;
34633 unsigned long end;
34634
34635 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34636 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34637
34638 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34639 struct page *page;
34640 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34641 page = get_dump_page(addr);
34642 if (page) {
34643 void *kaddr = kmap(page);
34644 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34645 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34646 !dump_write(cprm->file, kaddr,
34647 PAGE_SIZE);
34648 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34649
34650 if (e_phnum == PN_XNUM) {
34651 size += sizeof(*shdr4extnum);
34652 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34653 if (size > cprm->limit
34654 || !dump_write(cprm->file, shdr4extnum,
34655 sizeof(*shdr4extnum)))
34656 @@ -2067,6 +2380,97 @@ out:
34657
34658 #endif /* CONFIG_ELF_CORE */
34659
34660 +#ifdef CONFIG_PAX_MPROTECT
34661 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
34662 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34663 + * we'll remove VM_MAYWRITE for good on RELRO segments.
34664 + *
34665 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34666 + * basis because we want to allow the common case and not the special ones.
34667 + */
34668 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34669 +{
34670 + struct elfhdr elf_h;
34671 + struct elf_phdr elf_p;
34672 + unsigned long i;
34673 + unsigned long oldflags;
34674 + bool is_textrel_rw, is_textrel_rx, is_relro;
34675 +
34676 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34677 + return;
34678 +
34679 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34680 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34681 +
34682 +#ifdef CONFIG_PAX_ELFRELOCS
34683 + /* possible TEXTREL */
34684 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34685 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34686 +#else
34687 + is_textrel_rw = false;
34688 + is_textrel_rx = false;
34689 +#endif
34690 +
34691 + /* possible RELRO */
34692 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34693 +
34694 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34695 + return;
34696 +
34697 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34698 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34699 +
34700 +#ifdef CONFIG_PAX_ETEXECRELOCS
34701 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34702 +#else
34703 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34704 +#endif
34705 +
34706 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34707 + !elf_check_arch(&elf_h) ||
34708 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34709 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34710 + return;
34711 +
34712 + for (i = 0UL; i < elf_h.e_phnum; i++) {
34713 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34714 + return;
34715 + switch (elf_p.p_type) {
34716 + case PT_DYNAMIC:
34717 + if (!is_textrel_rw && !is_textrel_rx)
34718 + continue;
34719 + i = 0UL;
34720 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34721 + elf_dyn dyn;
34722 +
34723 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34724 + return;
34725 + if (dyn.d_tag == DT_NULL)
34726 + return;
34727 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34728 + gr_log_textrel(vma);
34729 + if (is_textrel_rw)
34730 + vma->vm_flags |= VM_MAYWRITE;
34731 + else
34732 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34733 + vma->vm_flags &= ~VM_MAYWRITE;
34734 + return;
34735 + }
34736 + i++;
34737 + }
34738 + return;
34739 +
34740 + case PT_GNU_RELRO:
34741 + if (!is_relro)
34742 + continue;
34743 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34744 + vma->vm_flags &= ~VM_MAYWRITE;
34745 + return;
34746 + }
34747 + }
34748 +}
34749 +#endif
34750 +
34751 static int __init init_elf_binfmt(void)
34752 {
34753 return register_binfmt(&elf_format);
34754 diff -urNp linux-2.6.39.4/fs/binfmt_flat.c linux-2.6.39.4/fs/binfmt_flat.c
34755 --- linux-2.6.39.4/fs/binfmt_flat.c 2011-05-19 00:06:34.000000000 -0400
34756 +++ linux-2.6.39.4/fs/binfmt_flat.c 2011-08-05 19:44:37.000000000 -0400
34757 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34758 realdatastart = (unsigned long) -ENOMEM;
34759 printk("Unable to allocate RAM for process data, errno %d\n",
34760 (int)-realdatastart);
34761 + down_write(&current->mm->mmap_sem);
34762 do_munmap(current->mm, textpos, text_len);
34763 + up_write(&current->mm->mmap_sem);
34764 ret = realdatastart;
34765 goto err;
34766 }
34767 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34768 }
34769 if (IS_ERR_VALUE(result)) {
34770 printk("Unable to read data+bss, errno %d\n", (int)-result);
34771 + down_write(&current->mm->mmap_sem);
34772 do_munmap(current->mm, textpos, text_len);
34773 do_munmap(current->mm, realdatastart, len);
34774 + up_write(&current->mm->mmap_sem);
34775 ret = result;
34776 goto err;
34777 }
34778 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34779 }
34780 if (IS_ERR_VALUE(result)) {
34781 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34782 + down_write(&current->mm->mmap_sem);
34783 do_munmap(current->mm, textpos, text_len + data_len + extra +
34784 MAX_SHARED_LIBS * sizeof(unsigned long));
34785 + up_write(&current->mm->mmap_sem);
34786 ret = result;
34787 goto err;
34788 }
34789 diff -urNp linux-2.6.39.4/fs/bio.c linux-2.6.39.4/fs/bio.c
34790 --- linux-2.6.39.4/fs/bio.c 2011-05-19 00:06:34.000000000 -0400
34791 +++ linux-2.6.39.4/fs/bio.c 2011-08-05 19:44:37.000000000 -0400
34792 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34793 const int read = bio_data_dir(bio) == READ;
34794 struct bio_map_data *bmd = bio->bi_private;
34795 int i;
34796 - char *p = bmd->sgvecs[0].iov_base;
34797 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
34798
34799 __bio_for_each_segment(bvec, bio, i, 0) {
34800 char *addr = page_address(bvec->bv_page);
34801 diff -urNp linux-2.6.39.4/fs/block_dev.c linux-2.6.39.4/fs/block_dev.c
34802 --- linux-2.6.39.4/fs/block_dev.c 2011-07-09 09:18:51.000000000 -0400
34803 +++ linux-2.6.39.4/fs/block_dev.c 2011-08-05 19:44:37.000000000 -0400
34804 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34805 else if (bdev->bd_contains == bdev)
34806 return true; /* is a whole device which isn't held */
34807
34808 - else if (whole->bd_holder == bd_may_claim)
34809 + else if (whole->bd_holder == (void *)bd_may_claim)
34810 return true; /* is a partition of a device that is being partitioned */
34811 else if (whole->bd_holder != NULL)
34812 return false; /* is a partition of a held device */
34813 diff -urNp linux-2.6.39.4/fs/btrfs/ctree.c linux-2.6.39.4/fs/btrfs/ctree.c
34814 --- linux-2.6.39.4/fs/btrfs/ctree.c 2011-05-19 00:06:34.000000000 -0400
34815 +++ linux-2.6.39.4/fs/btrfs/ctree.c 2011-08-05 19:44:37.000000000 -0400
34816 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
34817 free_extent_buffer(buf);
34818 add_root_to_dirty_list(root);
34819 } else {
34820 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34821 - parent_start = parent->start;
34822 - else
34823 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34824 + if (parent)
34825 + parent_start = parent->start;
34826 + else
34827 + parent_start = 0;
34828 + } else
34829 parent_start = 0;
34830
34831 WARN_ON(trans->transid != btrfs_header_generation(parent));
34832 @@ -3647,7 +3650,6 @@ setup_items_for_insert(struct btrfs_tran
34833
34834 ret = 0;
34835 if (slot == 0) {
34836 - struct btrfs_disk_key disk_key;
34837 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
34838 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
34839 }
34840 diff -urNp linux-2.6.39.4/fs/btrfs/free-space-cache.c linux-2.6.39.4/fs/btrfs/free-space-cache.c
34841 --- linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-05-19 00:06:34.000000000 -0400
34842 +++ linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-08-05 19:44:37.000000000 -0400
34843 @@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34844 while(1) {
34845 if (entry->bytes < bytes ||
34846 (!entry->bitmap && entry->offset < min_start)) {
34847 - struct rb_node *node;
34848 -
34849 node = rb_next(&entry->offset_index);
34850 if (!node)
34851 break;
34852 @@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34853 cluster, entry, bytes,
34854 min_start);
34855 if (ret == 0) {
34856 - struct rb_node *node;
34857 node = rb_next(&entry->offset_index);
34858 if (!node)
34859 break;
34860 diff -urNp linux-2.6.39.4/fs/btrfs/inode.c linux-2.6.39.4/fs/btrfs/inode.c
34861 --- linux-2.6.39.4/fs/btrfs/inode.c 2011-05-19 00:06:34.000000000 -0400
34862 +++ linux-2.6.39.4/fs/btrfs/inode.c 2011-08-05 20:34:06.000000000 -0400
34863 @@ -6947,7 +6947,7 @@ fail:
34864 return -ENOMEM;
34865 }
34866
34867 -static int btrfs_getattr(struct vfsmount *mnt,
34868 +int btrfs_getattr(struct vfsmount *mnt,
34869 struct dentry *dentry, struct kstat *stat)
34870 {
34871 struct inode *inode = dentry->d_inode;
34872 @@ -6959,6 +6959,14 @@ static int btrfs_getattr(struct vfsmount
34873 return 0;
34874 }
34875
34876 +EXPORT_SYMBOL(btrfs_getattr);
34877 +
34878 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
34879 +{
34880 + return BTRFS_I(inode)->root->anon_super.s_dev;
34881 +}
34882 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34883 +
34884 /*
34885 * If a file is moved, it will inherit the cow and compression flags of the new
34886 * directory.
34887 diff -urNp linux-2.6.39.4/fs/btrfs/ioctl.c linux-2.6.39.4/fs/btrfs/ioctl.c
34888 --- linux-2.6.39.4/fs/btrfs/ioctl.c 2011-05-19 00:06:34.000000000 -0400
34889 +++ linux-2.6.39.4/fs/btrfs/ioctl.c 2011-08-05 19:44:37.000000000 -0400
34890 @@ -2361,9 +2361,12 @@ long btrfs_ioctl_space_info(struct btrfs
34891 for (i = 0; i < num_types; i++) {
34892 struct btrfs_space_info *tmp;
34893
34894 + /* Don't copy in more than we allocated */
34895 if (!slot_count)
34896 break;
34897
34898 + slot_count--;
34899 +
34900 info = NULL;
34901 rcu_read_lock();
34902 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34903 @@ -2385,10 +2388,7 @@ long btrfs_ioctl_space_info(struct btrfs
34904 memcpy(dest, &space, sizeof(space));
34905 dest++;
34906 space_args.total_spaces++;
34907 - slot_count--;
34908 }
34909 - if (!slot_count)
34910 - break;
34911 }
34912 up_read(&info->groups_sem);
34913 }
34914 diff -urNp linux-2.6.39.4/fs/btrfs/relocation.c linux-2.6.39.4/fs/btrfs/relocation.c
34915 --- linux-2.6.39.4/fs/btrfs/relocation.c 2011-05-19 00:06:34.000000000 -0400
34916 +++ linux-2.6.39.4/fs/btrfs/relocation.c 2011-08-05 19:44:37.000000000 -0400
34917 @@ -1239,7 +1239,7 @@ static int __update_reloc_root(struct bt
34918 }
34919 spin_unlock(&rc->reloc_root_tree.lock);
34920
34921 - BUG_ON((struct btrfs_root *)node->data != root);
34922 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
34923
34924 if (!del) {
34925 spin_lock(&rc->reloc_root_tree.lock);
34926 diff -urNp linux-2.6.39.4/fs/cachefiles/bind.c linux-2.6.39.4/fs/cachefiles/bind.c
34927 --- linux-2.6.39.4/fs/cachefiles/bind.c 2011-05-19 00:06:34.000000000 -0400
34928 +++ linux-2.6.39.4/fs/cachefiles/bind.c 2011-08-05 19:44:37.000000000 -0400
34929 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
34930 args);
34931
34932 /* start by checking things over */
34933 - ASSERT(cache->fstop_percent >= 0 &&
34934 - cache->fstop_percent < cache->fcull_percent &&
34935 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
34936 cache->fcull_percent < cache->frun_percent &&
34937 cache->frun_percent < 100);
34938
34939 - ASSERT(cache->bstop_percent >= 0 &&
34940 - cache->bstop_percent < cache->bcull_percent &&
34941 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
34942 cache->bcull_percent < cache->brun_percent &&
34943 cache->brun_percent < 100);
34944
34945 diff -urNp linux-2.6.39.4/fs/cachefiles/daemon.c linux-2.6.39.4/fs/cachefiles/daemon.c
34946 --- linux-2.6.39.4/fs/cachefiles/daemon.c 2011-05-19 00:06:34.000000000 -0400
34947 +++ linux-2.6.39.4/fs/cachefiles/daemon.c 2011-08-05 19:44:37.000000000 -0400
34948 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
34949 if (n > buflen)
34950 return -EMSGSIZE;
34951
34952 - if (copy_to_user(_buffer, buffer, n) != 0)
34953 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
34954 return -EFAULT;
34955
34956 return n;
34957 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
34958 if (test_bit(CACHEFILES_DEAD, &cache->flags))
34959 return -EIO;
34960
34961 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
34962 + if (datalen > PAGE_SIZE - 1)
34963 return -EOPNOTSUPP;
34964
34965 /* drag the command string into the kernel so we can parse it */
34966 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
34967 if (args[0] != '%' || args[1] != '\0')
34968 return -EINVAL;
34969
34970 - if (fstop < 0 || fstop >= cache->fcull_percent)
34971 + if (fstop >= cache->fcull_percent)
34972 return cachefiles_daemon_range_error(cache, args);
34973
34974 cache->fstop_percent = fstop;
34975 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
34976 if (args[0] != '%' || args[1] != '\0')
34977 return -EINVAL;
34978
34979 - if (bstop < 0 || bstop >= cache->bcull_percent)
34980 + if (bstop >= cache->bcull_percent)
34981 return cachefiles_daemon_range_error(cache, args);
34982
34983 cache->bstop_percent = bstop;
34984 diff -urNp linux-2.6.39.4/fs/cachefiles/internal.h linux-2.6.39.4/fs/cachefiles/internal.h
34985 --- linux-2.6.39.4/fs/cachefiles/internal.h 2011-05-19 00:06:34.000000000 -0400
34986 +++ linux-2.6.39.4/fs/cachefiles/internal.h 2011-08-05 19:44:37.000000000 -0400
34987 @@ -57,7 +57,7 @@ struct cachefiles_cache {
34988 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
34989 struct rb_root active_nodes; /* active nodes (can't be culled) */
34990 rwlock_t active_lock; /* lock for active_nodes */
34991 - atomic_t gravecounter; /* graveyard uniquifier */
34992 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
34993 unsigned frun_percent; /* when to stop culling (% files) */
34994 unsigned fcull_percent; /* when to start culling (% files) */
34995 unsigned fstop_percent; /* when to stop allocating (% files) */
34996 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
34997 * proc.c
34998 */
34999 #ifdef CONFIG_CACHEFILES_HISTOGRAM
35000 -extern atomic_t cachefiles_lookup_histogram[HZ];
35001 -extern atomic_t cachefiles_mkdir_histogram[HZ];
35002 -extern atomic_t cachefiles_create_histogram[HZ];
35003 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35004 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35005 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
35006
35007 extern int __init cachefiles_proc_init(void);
35008 extern void cachefiles_proc_cleanup(void);
35009 static inline
35010 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
35011 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
35012 {
35013 unsigned long jif = jiffies - start_jif;
35014 if (jif >= HZ)
35015 jif = HZ - 1;
35016 - atomic_inc(&histogram[jif]);
35017 + atomic_inc_unchecked(&histogram[jif]);
35018 }
35019
35020 #else
35021 diff -urNp linux-2.6.39.4/fs/cachefiles/namei.c linux-2.6.39.4/fs/cachefiles/namei.c
35022 --- linux-2.6.39.4/fs/cachefiles/namei.c 2011-05-19 00:06:34.000000000 -0400
35023 +++ linux-2.6.39.4/fs/cachefiles/namei.c 2011-08-05 19:44:37.000000000 -0400
35024 @@ -318,7 +318,7 @@ try_again:
35025 /* first step is to make up a grave dentry in the graveyard */
35026 sprintf(nbuffer, "%08x%08x",
35027 (uint32_t) get_seconds(),
35028 - (uint32_t) atomic_inc_return(&cache->gravecounter));
35029 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
35030
35031 /* do the multiway lock magic */
35032 trap = lock_rename(cache->graveyard, dir);
35033 diff -urNp linux-2.6.39.4/fs/cachefiles/proc.c linux-2.6.39.4/fs/cachefiles/proc.c
35034 --- linux-2.6.39.4/fs/cachefiles/proc.c 2011-05-19 00:06:34.000000000 -0400
35035 +++ linux-2.6.39.4/fs/cachefiles/proc.c 2011-08-05 19:44:37.000000000 -0400
35036 @@ -14,9 +14,9 @@
35037 #include <linux/seq_file.h>
35038 #include "internal.h"
35039
35040 -atomic_t cachefiles_lookup_histogram[HZ];
35041 -atomic_t cachefiles_mkdir_histogram[HZ];
35042 -atomic_t cachefiles_create_histogram[HZ];
35043 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35044 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35045 +atomic_unchecked_t cachefiles_create_histogram[HZ];
35046
35047 /*
35048 * display the latency histogram
35049 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
35050 return 0;
35051 default:
35052 index = (unsigned long) v - 3;
35053 - x = atomic_read(&cachefiles_lookup_histogram[index]);
35054 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
35055 - z = atomic_read(&cachefiles_create_histogram[index]);
35056 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
35057 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
35058 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
35059 if (x == 0 && y == 0 && z == 0)
35060 return 0;
35061
35062 diff -urNp linux-2.6.39.4/fs/cachefiles/rdwr.c linux-2.6.39.4/fs/cachefiles/rdwr.c
35063 --- linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-05-19 00:06:34.000000000 -0400
35064 +++ linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-08-05 19:44:37.000000000 -0400
35065 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
35066 old_fs = get_fs();
35067 set_fs(KERNEL_DS);
35068 ret = file->f_op->write(
35069 - file, (const void __user *) data, len, &pos);
35070 + file, (__force const void __user *) data, len, &pos);
35071 set_fs(old_fs);
35072 kunmap(page);
35073 if (ret != len)
35074 diff -urNp linux-2.6.39.4/fs/ceph/dir.c linux-2.6.39.4/fs/ceph/dir.c
35075 --- linux-2.6.39.4/fs/ceph/dir.c 2011-05-19 00:06:34.000000000 -0400
35076 +++ linux-2.6.39.4/fs/ceph/dir.c 2011-08-05 19:44:37.000000000 -0400
35077 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
35078 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35079 struct ceph_mds_client *mdsc = fsc->mdsc;
35080 unsigned frag = fpos_frag(filp->f_pos);
35081 - int off = fpos_off(filp->f_pos);
35082 + unsigned int off = fpos_off(filp->f_pos);
35083 int err;
35084 u32 ftype;
35085 struct ceph_mds_reply_info_parsed *rinfo;
35086 @@ -360,7 +360,7 @@ more:
35087 rinfo = &fi->last_readdir->r_reply_info;
35088 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
35089 rinfo->dir_nr, off, fi->offset);
35090 - while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
35091 + while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
35092 u64 pos = ceph_make_fpos(frag, off);
35093 struct ceph_mds_reply_inode *in =
35094 rinfo->dir_in[off - fi->offset].in;
35095 diff -urNp linux-2.6.39.4/fs/cifs/cifs_debug.c linux-2.6.39.4/fs/cifs/cifs_debug.c
35096 --- linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-05-19 00:06:34.000000000 -0400
35097 +++ linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-08-05 19:44:37.000000000 -0400
35098 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35099 tcon = list_entry(tmp3,
35100 struct cifsTconInfo,
35101 tcon_list);
35102 - atomic_set(&tcon->num_smbs_sent, 0);
35103 - atomic_set(&tcon->num_writes, 0);
35104 - atomic_set(&tcon->num_reads, 0);
35105 - atomic_set(&tcon->num_oplock_brks, 0);
35106 - atomic_set(&tcon->num_opens, 0);
35107 - atomic_set(&tcon->num_posixopens, 0);
35108 - atomic_set(&tcon->num_posixmkdirs, 0);
35109 - atomic_set(&tcon->num_closes, 0);
35110 - atomic_set(&tcon->num_deletes, 0);
35111 - atomic_set(&tcon->num_mkdirs, 0);
35112 - atomic_set(&tcon->num_rmdirs, 0);
35113 - atomic_set(&tcon->num_renames, 0);
35114 - atomic_set(&tcon->num_t2renames, 0);
35115 - atomic_set(&tcon->num_ffirst, 0);
35116 - atomic_set(&tcon->num_fnext, 0);
35117 - atomic_set(&tcon->num_fclose, 0);
35118 - atomic_set(&tcon->num_hardlinks, 0);
35119 - atomic_set(&tcon->num_symlinks, 0);
35120 - atomic_set(&tcon->num_locks, 0);
35121 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35122 + atomic_set_unchecked(&tcon->num_writes, 0);
35123 + atomic_set_unchecked(&tcon->num_reads, 0);
35124 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35125 + atomic_set_unchecked(&tcon->num_opens, 0);
35126 + atomic_set_unchecked(&tcon->num_posixopens, 0);
35127 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35128 + atomic_set_unchecked(&tcon->num_closes, 0);
35129 + atomic_set_unchecked(&tcon->num_deletes, 0);
35130 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
35131 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
35132 + atomic_set_unchecked(&tcon->num_renames, 0);
35133 + atomic_set_unchecked(&tcon->num_t2renames, 0);
35134 + atomic_set_unchecked(&tcon->num_ffirst, 0);
35135 + atomic_set_unchecked(&tcon->num_fnext, 0);
35136 + atomic_set_unchecked(&tcon->num_fclose, 0);
35137 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
35138 + atomic_set_unchecked(&tcon->num_symlinks, 0);
35139 + atomic_set_unchecked(&tcon->num_locks, 0);
35140 }
35141 }
35142 }
35143 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35144 if (tcon->need_reconnect)
35145 seq_puts(m, "\tDISCONNECTED ");
35146 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35147 - atomic_read(&tcon->num_smbs_sent),
35148 - atomic_read(&tcon->num_oplock_brks));
35149 + atomic_read_unchecked(&tcon->num_smbs_sent),
35150 + atomic_read_unchecked(&tcon->num_oplock_brks));
35151 seq_printf(m, "\nReads: %d Bytes: %lld",
35152 - atomic_read(&tcon->num_reads),
35153 + atomic_read_unchecked(&tcon->num_reads),
35154 (long long)(tcon->bytes_read));
35155 seq_printf(m, "\nWrites: %d Bytes: %lld",
35156 - atomic_read(&tcon->num_writes),
35157 + atomic_read_unchecked(&tcon->num_writes),
35158 (long long)(tcon->bytes_written));
35159 seq_printf(m, "\nFlushes: %d",
35160 - atomic_read(&tcon->num_flushes));
35161 + atomic_read_unchecked(&tcon->num_flushes));
35162 seq_printf(m, "\nLocks: %d HardLinks: %d "
35163 "Symlinks: %d",
35164 - atomic_read(&tcon->num_locks),
35165 - atomic_read(&tcon->num_hardlinks),
35166 - atomic_read(&tcon->num_symlinks));
35167 + atomic_read_unchecked(&tcon->num_locks),
35168 + atomic_read_unchecked(&tcon->num_hardlinks),
35169 + atomic_read_unchecked(&tcon->num_symlinks));
35170 seq_printf(m, "\nOpens: %d Closes: %d "
35171 "Deletes: %d",
35172 - atomic_read(&tcon->num_opens),
35173 - atomic_read(&tcon->num_closes),
35174 - atomic_read(&tcon->num_deletes));
35175 + atomic_read_unchecked(&tcon->num_opens),
35176 + atomic_read_unchecked(&tcon->num_closes),
35177 + atomic_read_unchecked(&tcon->num_deletes));
35178 seq_printf(m, "\nPosix Opens: %d "
35179 "Posix Mkdirs: %d",
35180 - atomic_read(&tcon->num_posixopens),
35181 - atomic_read(&tcon->num_posixmkdirs));
35182 + atomic_read_unchecked(&tcon->num_posixopens),
35183 + atomic_read_unchecked(&tcon->num_posixmkdirs));
35184 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35185 - atomic_read(&tcon->num_mkdirs),
35186 - atomic_read(&tcon->num_rmdirs));
35187 + atomic_read_unchecked(&tcon->num_mkdirs),
35188 + atomic_read_unchecked(&tcon->num_rmdirs));
35189 seq_printf(m, "\nRenames: %d T2 Renames %d",
35190 - atomic_read(&tcon->num_renames),
35191 - atomic_read(&tcon->num_t2renames));
35192 + atomic_read_unchecked(&tcon->num_renames),
35193 + atomic_read_unchecked(&tcon->num_t2renames));
35194 seq_printf(m, "\nFindFirst: %d FNext %d "
35195 "FClose %d",
35196 - atomic_read(&tcon->num_ffirst),
35197 - atomic_read(&tcon->num_fnext),
35198 - atomic_read(&tcon->num_fclose));
35199 + atomic_read_unchecked(&tcon->num_ffirst),
35200 + atomic_read_unchecked(&tcon->num_fnext),
35201 + atomic_read_unchecked(&tcon->num_fclose));
35202 }
35203 }
35204 }
35205 diff -urNp linux-2.6.39.4/fs/cifs/cifsglob.h linux-2.6.39.4/fs/cifs/cifsglob.h
35206 --- linux-2.6.39.4/fs/cifs/cifsglob.h 2011-05-19 00:06:34.000000000 -0400
35207 +++ linux-2.6.39.4/fs/cifs/cifsglob.h 2011-08-05 19:44:37.000000000 -0400
35208 @@ -305,28 +305,28 @@ struct cifsTconInfo {
35209 __u16 Flags; /* optional support bits */
35210 enum statusEnum tidStatus;
35211 #ifdef CONFIG_CIFS_STATS
35212 - atomic_t num_smbs_sent;
35213 - atomic_t num_writes;
35214 - atomic_t num_reads;
35215 - atomic_t num_flushes;
35216 - atomic_t num_oplock_brks;
35217 - atomic_t num_opens;
35218 - atomic_t num_closes;
35219 - atomic_t num_deletes;
35220 - atomic_t num_mkdirs;
35221 - atomic_t num_posixopens;
35222 - atomic_t num_posixmkdirs;
35223 - atomic_t num_rmdirs;
35224 - atomic_t num_renames;
35225 - atomic_t num_t2renames;
35226 - atomic_t num_ffirst;
35227 - atomic_t num_fnext;
35228 - atomic_t num_fclose;
35229 - atomic_t num_hardlinks;
35230 - atomic_t num_symlinks;
35231 - atomic_t num_locks;
35232 - atomic_t num_acl_get;
35233 - atomic_t num_acl_set;
35234 + atomic_unchecked_t num_smbs_sent;
35235 + atomic_unchecked_t num_writes;
35236 + atomic_unchecked_t num_reads;
35237 + atomic_unchecked_t num_flushes;
35238 + atomic_unchecked_t num_oplock_brks;
35239 + atomic_unchecked_t num_opens;
35240 + atomic_unchecked_t num_closes;
35241 + atomic_unchecked_t num_deletes;
35242 + atomic_unchecked_t num_mkdirs;
35243 + atomic_unchecked_t num_posixopens;
35244 + atomic_unchecked_t num_posixmkdirs;
35245 + atomic_unchecked_t num_rmdirs;
35246 + atomic_unchecked_t num_renames;
35247 + atomic_unchecked_t num_t2renames;
35248 + atomic_unchecked_t num_ffirst;
35249 + atomic_unchecked_t num_fnext;
35250 + atomic_unchecked_t num_fclose;
35251 + atomic_unchecked_t num_hardlinks;
35252 + atomic_unchecked_t num_symlinks;
35253 + atomic_unchecked_t num_locks;
35254 + atomic_unchecked_t num_acl_get;
35255 + atomic_unchecked_t num_acl_set;
35256 #ifdef CONFIG_CIFS_STATS2
35257 unsigned long long time_writes;
35258 unsigned long long time_reads;
35259 @@ -509,7 +509,7 @@ static inline char CIFS_DIR_SEP(const st
35260 }
35261
35262 #ifdef CONFIG_CIFS_STATS
35263 -#define cifs_stats_inc atomic_inc
35264 +#define cifs_stats_inc atomic_inc_unchecked
35265
35266 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
35267 unsigned int bytes)
35268 diff -urNp linux-2.6.39.4/fs/cifs/link.c linux-2.6.39.4/fs/cifs/link.c
35269 --- linux-2.6.39.4/fs/cifs/link.c 2011-05-19 00:06:34.000000000 -0400
35270 +++ linux-2.6.39.4/fs/cifs/link.c 2011-08-05 19:44:37.000000000 -0400
35271 @@ -577,7 +577,7 @@ symlink_exit:
35272
35273 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35274 {
35275 - char *p = nd_get_link(nd);
35276 + const char *p = nd_get_link(nd);
35277 if (!IS_ERR(p))
35278 kfree(p);
35279 }
35280 diff -urNp linux-2.6.39.4/fs/coda/cache.c linux-2.6.39.4/fs/coda/cache.c
35281 --- linux-2.6.39.4/fs/coda/cache.c 2011-05-19 00:06:34.000000000 -0400
35282 +++ linux-2.6.39.4/fs/coda/cache.c 2011-08-05 19:44:37.000000000 -0400
35283 @@ -24,7 +24,7 @@
35284 #include "coda_linux.h"
35285 #include "coda_cache.h"
35286
35287 -static atomic_t permission_epoch = ATOMIC_INIT(0);
35288 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35289
35290 /* replace or extend an acl cache hit */
35291 void coda_cache_enter(struct inode *inode, int mask)
35292 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35293 struct coda_inode_info *cii = ITOC(inode);
35294
35295 spin_lock(&cii->c_lock);
35296 - cii->c_cached_epoch = atomic_read(&permission_epoch);
35297 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35298 if (cii->c_uid != current_fsuid()) {
35299 cii->c_uid = current_fsuid();
35300 cii->c_cached_perm = mask;
35301 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35302 {
35303 struct coda_inode_info *cii = ITOC(inode);
35304 spin_lock(&cii->c_lock);
35305 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35306 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35307 spin_unlock(&cii->c_lock);
35308 }
35309
35310 /* remove all acl caches */
35311 void coda_cache_clear_all(struct super_block *sb)
35312 {
35313 - atomic_inc(&permission_epoch);
35314 + atomic_inc_unchecked(&permission_epoch);
35315 }
35316
35317
35318 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35319 spin_lock(&cii->c_lock);
35320 hit = (mask & cii->c_cached_perm) == mask &&
35321 cii->c_uid == current_fsuid() &&
35322 - cii->c_cached_epoch == atomic_read(&permission_epoch);
35323 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35324 spin_unlock(&cii->c_lock);
35325
35326 return hit;
35327 diff -urNp linux-2.6.39.4/fs/compat_binfmt_elf.c linux-2.6.39.4/fs/compat_binfmt_elf.c
35328 --- linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
35329 +++ linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
35330 @@ -30,11 +30,13 @@
35331 #undef elf_phdr
35332 #undef elf_shdr
35333 #undef elf_note
35334 +#undef elf_dyn
35335 #undef elf_addr_t
35336 #define elfhdr elf32_hdr
35337 #define elf_phdr elf32_phdr
35338 #define elf_shdr elf32_shdr
35339 #define elf_note elf32_note
35340 +#define elf_dyn Elf32_Dyn
35341 #define elf_addr_t Elf32_Addr
35342
35343 /*
35344 diff -urNp linux-2.6.39.4/fs/compat.c linux-2.6.39.4/fs/compat.c
35345 --- linux-2.6.39.4/fs/compat.c 2011-05-19 00:06:34.000000000 -0400
35346 +++ linux-2.6.39.4/fs/compat.c 2011-08-05 19:44:37.000000000 -0400
35347 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35348 goto out;
35349
35350 ret = -EINVAL;
35351 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35352 + if (nr_segs > UIO_MAXIOV)
35353 goto out;
35354 if (nr_segs > fast_segs) {
35355 ret = -ENOMEM;
35356 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35357
35358 struct compat_readdir_callback {
35359 struct compat_old_linux_dirent __user *dirent;
35360 + struct file * file;
35361 int result;
35362 };
35363
35364 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35365 buf->result = -EOVERFLOW;
35366 return -EOVERFLOW;
35367 }
35368 +
35369 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35370 + return 0;
35371 +
35372 buf->result++;
35373 dirent = buf->dirent;
35374 if (!access_ok(VERIFY_WRITE, dirent,
35375 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35376
35377 buf.result = 0;
35378 buf.dirent = dirent;
35379 + buf.file = file;
35380
35381 error = vfs_readdir(file, compat_fillonedir, &buf);
35382 if (buf.result)
35383 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
35384 struct compat_getdents_callback {
35385 struct compat_linux_dirent __user *current_dir;
35386 struct compat_linux_dirent __user *previous;
35387 + struct file * file;
35388 int count;
35389 int error;
35390 };
35391 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35392 buf->error = -EOVERFLOW;
35393 return -EOVERFLOW;
35394 }
35395 +
35396 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35397 + return 0;
35398 +
35399 dirent = buf->previous;
35400 if (dirent) {
35401 if (__put_user(offset, &dirent->d_off))
35402 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35403 buf.previous = NULL;
35404 buf.count = count;
35405 buf.error = 0;
35406 + buf.file = file;
35407
35408 error = vfs_readdir(file, compat_filldir, &buf);
35409 if (error >= 0)
35410 @@ -1006,6 +1018,7 @@ out:
35411 struct compat_getdents_callback64 {
35412 struct linux_dirent64 __user *current_dir;
35413 struct linux_dirent64 __user *previous;
35414 + struct file * file;
35415 int count;
35416 int error;
35417 };
35418 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35419 buf->error = -EINVAL; /* only used if we fail.. */
35420 if (reclen > buf->count)
35421 return -EINVAL;
35422 +
35423 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35424 + return 0;
35425 +
35426 dirent = buf->previous;
35427
35428 if (dirent) {
35429 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35430 buf.previous = NULL;
35431 buf.count = count;
35432 buf.error = 0;
35433 + buf.file = file;
35434
35435 error = vfs_readdir(file, compat_filldir64, &buf);
35436 if (error >= 0)
35437 @@ -1436,6 +1454,11 @@ int compat_do_execve(char * filename,
35438 compat_uptr_t __user *envp,
35439 struct pt_regs * regs)
35440 {
35441 +#ifdef CONFIG_GRKERNSEC
35442 + struct file *old_exec_file;
35443 + struct acl_subject_label *old_acl;
35444 + struct rlimit old_rlim[RLIM_NLIMITS];
35445 +#endif
35446 struct linux_binprm *bprm;
35447 struct file *file;
35448 struct files_struct *displaced;
35449 @@ -1472,6 +1495,19 @@ int compat_do_execve(char * filename,
35450 bprm->filename = filename;
35451 bprm->interp = filename;
35452
35453 + if (gr_process_user_ban()) {
35454 + retval = -EPERM;
35455 + goto out_file;
35456 + }
35457 +
35458 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35459 + retval = -EAGAIN;
35460 + if (gr_handle_nproc())
35461 + goto out_file;
35462 + retval = -EACCES;
35463 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
35464 + goto out_file;
35465 +
35466 retval = bprm_mm_init(bprm);
35467 if (retval)
35468 goto out_file;
35469 @@ -1501,9 +1537,40 @@ int compat_do_execve(char * filename,
35470 if (retval < 0)
35471 goto out;
35472
35473 + if (!gr_tpe_allow(file)) {
35474 + retval = -EACCES;
35475 + goto out;
35476 + }
35477 +
35478 + if (gr_check_crash_exec(file)) {
35479 + retval = -EACCES;
35480 + goto out;
35481 + }
35482 +
35483 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35484 +
35485 + gr_handle_exec_args_compat(bprm, argv);
35486 +
35487 +#ifdef CONFIG_GRKERNSEC
35488 + old_acl = current->acl;
35489 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35490 + old_exec_file = current->exec_file;
35491 + get_file(file);
35492 + current->exec_file = file;
35493 +#endif
35494 +
35495 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35496 + bprm->unsafe & LSM_UNSAFE_SHARE);
35497 + if (retval < 0)
35498 + goto out_fail;
35499 +
35500 retval = search_binary_handler(bprm, regs);
35501 if (retval < 0)
35502 - goto out;
35503 + goto out_fail;
35504 +#ifdef CONFIG_GRKERNSEC
35505 + if (old_exec_file)
35506 + fput(old_exec_file);
35507 +#endif
35508
35509 /* execve succeeded */
35510 current->fs->in_exec = 0;
35511 @@ -1514,6 +1581,14 @@ int compat_do_execve(char * filename,
35512 put_files_struct(displaced);
35513 return retval;
35514
35515 +out_fail:
35516 +#ifdef CONFIG_GRKERNSEC
35517 + current->acl = old_acl;
35518 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35519 + fput(current->exec_file);
35520 + current->exec_file = old_exec_file;
35521 +#endif
35522 +
35523 out:
35524 if (bprm->mm) {
35525 acct_arg_size(bprm, 0);
35526 @@ -1681,6 +1756,8 @@ int compat_core_sys_select(int n, compat
35527 struct fdtable *fdt;
35528 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35529
35530 + pax_track_stack();
35531 +
35532 if (n < 0)
35533 goto out_nofds;
35534
35535 diff -urNp linux-2.6.39.4/fs/compat_ioctl.c linux-2.6.39.4/fs/compat_ioctl.c
35536 --- linux-2.6.39.4/fs/compat_ioctl.c 2011-05-19 00:06:34.000000000 -0400
35537 +++ linux-2.6.39.4/fs/compat_ioctl.c 2011-08-05 19:44:37.000000000 -0400
35538 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35539
35540 err = get_user(palp, &up->palette);
35541 err |= get_user(length, &up->length);
35542 + if (err)
35543 + return -EFAULT;
35544
35545 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35546 err = put_user(compat_ptr(palp), &up_native->palette);
35547 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35548 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35549 {
35550 unsigned int a, b;
35551 - a = *(unsigned int *)p;
35552 - b = *(unsigned int *)q;
35553 + a = *(const unsigned int *)p;
35554 + b = *(const unsigned int *)q;
35555 if (a > b)
35556 return 1;
35557 if (a < b)
35558 diff -urNp linux-2.6.39.4/fs/configfs/dir.c linux-2.6.39.4/fs/configfs/dir.c
35559 --- linux-2.6.39.4/fs/configfs/dir.c 2011-05-19 00:06:34.000000000 -0400
35560 +++ linux-2.6.39.4/fs/configfs/dir.c 2011-08-05 19:44:37.000000000 -0400
35561 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35562 }
35563 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35564 struct configfs_dirent *next;
35565 - const char * name;
35566 + const unsigned char * name;
35567 + char d_name[sizeof(next->s_dentry->d_iname)];
35568 int len;
35569 struct inode *inode = NULL;
35570
35571 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35572 continue;
35573
35574 name = configfs_get_name(next);
35575 - len = strlen(name);
35576 + if (next->s_dentry && name == next->s_dentry->d_iname) {
35577 + len = next->s_dentry->d_name.len;
35578 + memcpy(d_name, name, len);
35579 + name = d_name;
35580 + } else
35581 + len = strlen(name);
35582
35583 /*
35584 * We'll have a dentry and an inode for
35585 diff -urNp linux-2.6.39.4/fs/dcache.c linux-2.6.39.4/fs/dcache.c
35586 --- linux-2.6.39.4/fs/dcache.c 2011-05-19 00:06:34.000000000 -0400
35587 +++ linux-2.6.39.4/fs/dcache.c 2011-08-05 19:44:37.000000000 -0400
35588 @@ -3069,7 +3069,7 @@ void __init vfs_caches_init(unsigned lon
35589 mempages -= reserve;
35590
35591 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35592 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35593 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35594
35595 dcache_init();
35596 inode_init();
35597 diff -urNp linux-2.6.39.4/fs/ecryptfs/inode.c linux-2.6.39.4/fs/ecryptfs/inode.c
35598 --- linux-2.6.39.4/fs/ecryptfs/inode.c 2011-06-03 00:04:14.000000000 -0400
35599 +++ linux-2.6.39.4/fs/ecryptfs/inode.c 2011-08-05 19:44:37.000000000 -0400
35600 @@ -623,7 +623,7 @@ static int ecryptfs_readlink_lower(struc
35601 old_fs = get_fs();
35602 set_fs(get_ds());
35603 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35604 - (char __user *)lower_buf,
35605 + (__force char __user *)lower_buf,
35606 lower_bufsiz);
35607 set_fs(old_fs);
35608 if (rc < 0)
35609 @@ -669,7 +669,7 @@ static void *ecryptfs_follow_link(struct
35610 }
35611 old_fs = get_fs();
35612 set_fs(get_ds());
35613 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35614 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35615 set_fs(old_fs);
35616 if (rc < 0) {
35617 kfree(buf);
35618 @@ -684,7 +684,7 @@ out:
35619 static void
35620 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35621 {
35622 - char *buf = nd_get_link(nd);
35623 + const char *buf = nd_get_link(nd);
35624 if (!IS_ERR(buf)) {
35625 /* Free the char* */
35626 kfree(buf);
35627 diff -urNp linux-2.6.39.4/fs/ecryptfs/miscdev.c linux-2.6.39.4/fs/ecryptfs/miscdev.c
35628 --- linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-05-19 00:06:34.000000000 -0400
35629 +++ linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-08-05 19:44:37.000000000 -0400
35630 @@ -328,7 +328,7 @@ check_list:
35631 goto out_unlock_msg_ctx;
35632 i = 5;
35633 if (msg_ctx->msg) {
35634 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
35635 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35636 goto out_unlock_msg_ctx;
35637 i += packet_length_size;
35638 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35639 diff -urNp linux-2.6.39.4/fs/exec.c linux-2.6.39.4/fs/exec.c
35640 --- linux-2.6.39.4/fs/exec.c 2011-06-25 12:55:23.000000000 -0400
35641 +++ linux-2.6.39.4/fs/exec.c 2011-08-05 19:44:37.000000000 -0400
35642 @@ -55,12 +55,24 @@
35643 #include <linux/fs_struct.h>
35644 #include <linux/pipe_fs_i.h>
35645 #include <linux/oom.h>
35646 +#include <linux/random.h>
35647 +#include <linux/seq_file.h>
35648 +
35649 +#ifdef CONFIG_PAX_REFCOUNT
35650 +#include <linux/kallsyms.h>
35651 +#include <linux/kdebug.h>
35652 +#endif
35653
35654 #include <asm/uaccess.h>
35655 #include <asm/mmu_context.h>
35656 #include <asm/tlb.h>
35657 #include "internal.h"
35658
35659 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35660 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35661 +EXPORT_SYMBOL(pax_set_initial_flags_func);
35662 +#endif
35663 +
35664 int core_uses_pid;
35665 char core_pattern[CORENAME_MAX_SIZE] = "core";
35666 unsigned int core_pipe_limit;
35667 @@ -70,7 +82,7 @@ struct core_name {
35668 char *corename;
35669 int used, size;
35670 };
35671 -static atomic_t call_count = ATOMIC_INIT(1);
35672 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35673
35674 /* The maximal length of core_pattern is also specified in sysctl.c */
35675
35676 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35677 char *tmp = getname(library);
35678 int error = PTR_ERR(tmp);
35679 static const struct open_flags uselib_flags = {
35680 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35681 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35682 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35683 .intent = LOOKUP_OPEN
35684 };
35685 @@ -190,18 +202,10 @@ struct page *get_arg_page(struct linux_b
35686 int write)
35687 {
35688 struct page *page;
35689 - int ret;
35690
35691 -#ifdef CONFIG_STACK_GROWSUP
35692 - if (write) {
35693 - ret = expand_stack_downwards(bprm->vma, pos);
35694 - if (ret < 0)
35695 - return NULL;
35696 - }
35697 -#endif
35698 - ret = get_user_pages(current, bprm->mm, pos,
35699 - 1, write, 1, &page, NULL);
35700 - if (ret <= 0)
35701 + if (0 > expand_stack_downwards(bprm->vma, pos))
35702 + return NULL;
35703 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35704 return NULL;
35705
35706 if (write) {
35707 @@ -276,6 +280,11 @@ static int __bprm_mm_init(struct linux_b
35708 vma->vm_end = STACK_TOP_MAX;
35709 vma->vm_start = vma->vm_end - PAGE_SIZE;
35710 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35711 +
35712 +#ifdef CONFIG_PAX_SEGMEXEC
35713 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35714 +#endif
35715 +
35716 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35717 INIT_LIST_HEAD(&vma->anon_vma_chain);
35718
35719 @@ -290,6 +299,12 @@ static int __bprm_mm_init(struct linux_b
35720 mm->stack_vm = mm->total_vm = 1;
35721 up_write(&mm->mmap_sem);
35722 bprm->p = vma->vm_end - sizeof(void *);
35723 +
35724 +#ifdef CONFIG_PAX_RANDUSTACK
35725 + if (randomize_va_space)
35726 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35727 +#endif
35728 +
35729 return 0;
35730 err:
35731 up_write(&mm->mmap_sem);
35732 @@ -525,7 +540,7 @@ int copy_strings_kernel(int argc, const
35733 int r;
35734 mm_segment_t oldfs = get_fs();
35735 set_fs(KERNEL_DS);
35736 - r = copy_strings(argc, (const char __user *const __user *)argv, bprm);
35737 + r = copy_strings(argc, (__force const char __user *const __user *)argv, bprm);
35738 set_fs(oldfs);
35739 return r;
35740 }
35741 @@ -555,7 +570,8 @@ static int shift_arg_pages(struct vm_are
35742 unsigned long new_end = old_end - shift;
35743 struct mmu_gather *tlb;
35744
35745 - BUG_ON(new_start > new_end);
35746 + if (new_start >= new_end || new_start < mmap_min_addr)
35747 + return -ENOMEM;
35748
35749 /*
35750 * ensure there are no vmas between where we want to go
35751 @@ -564,6 +580,10 @@ static int shift_arg_pages(struct vm_are
35752 if (vma != find_vma(mm, new_start))
35753 return -EFAULT;
35754
35755 +#ifdef CONFIG_PAX_SEGMEXEC
35756 + BUG_ON(pax_find_mirror_vma(vma));
35757 +#endif
35758 +
35759 /*
35760 * cover the whole range: [new_start, old_end)
35761 */
35762 @@ -644,10 +664,6 @@ int setup_arg_pages(struct linux_binprm
35763 stack_top = arch_align_stack(stack_top);
35764 stack_top = PAGE_ALIGN(stack_top);
35765
35766 - if (unlikely(stack_top < mmap_min_addr) ||
35767 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35768 - return -ENOMEM;
35769 -
35770 stack_shift = vma->vm_end - stack_top;
35771
35772 bprm->p -= stack_shift;
35773 @@ -659,8 +675,28 @@ int setup_arg_pages(struct linux_binprm
35774 bprm->exec -= stack_shift;
35775
35776 down_write(&mm->mmap_sem);
35777 +
35778 + /* Move stack pages down in memory. */
35779 + if (stack_shift) {
35780 + ret = shift_arg_pages(vma, stack_shift);
35781 + if (ret)
35782 + goto out_unlock;
35783 + }
35784 +
35785 vm_flags = VM_STACK_FLAGS;
35786
35787 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35788 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35789 + vm_flags &= ~VM_EXEC;
35790 +
35791 +#ifdef CONFIG_PAX_MPROTECT
35792 + if (mm->pax_flags & MF_PAX_MPROTECT)
35793 + vm_flags &= ~VM_MAYEXEC;
35794 +#endif
35795 +
35796 + }
35797 +#endif
35798 +
35799 /*
35800 * Adjust stack execute permissions; explicitly enable for
35801 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35802 @@ -679,13 +715,6 @@ int setup_arg_pages(struct linux_binprm
35803 goto out_unlock;
35804 BUG_ON(prev != vma);
35805
35806 - /* Move stack pages down in memory. */
35807 - if (stack_shift) {
35808 - ret = shift_arg_pages(vma, stack_shift);
35809 - if (ret)
35810 - goto out_unlock;
35811 - }
35812 -
35813 /* mprotect_fixup is overkill to remove the temporary stack flags */
35814 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35815
35816 @@ -725,7 +754,7 @@ struct file *open_exec(const char *name)
35817 struct file *file;
35818 int err;
35819 static const struct open_flags open_exec_flags = {
35820 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35821 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35822 .acc_mode = MAY_EXEC | MAY_OPEN,
35823 .intent = LOOKUP_OPEN
35824 };
35825 @@ -766,7 +795,7 @@ int kernel_read(struct file *file, loff_
35826 old_fs = get_fs();
35827 set_fs(get_ds());
35828 /* The cast to a user pointer is valid due to the set_fs() */
35829 - result = vfs_read(file, (void __user *)addr, count, &pos);
35830 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
35831 set_fs(old_fs);
35832 return result;
35833 }
35834 @@ -1189,7 +1218,7 @@ int check_unsafe_exec(struct linux_binpr
35835 }
35836 rcu_read_unlock();
35837
35838 - if (p->fs->users > n_fs) {
35839 + if (atomic_read(&p->fs->users) > n_fs) {
35840 bprm->unsafe |= LSM_UNSAFE_SHARE;
35841 } else {
35842 res = -EAGAIN;
35843 @@ -1381,6 +1410,11 @@ int do_execve(const char * filename,
35844 const char __user *const __user *envp,
35845 struct pt_regs * regs)
35846 {
35847 +#ifdef CONFIG_GRKERNSEC
35848 + struct file *old_exec_file;
35849 + struct acl_subject_label *old_acl;
35850 + struct rlimit old_rlim[RLIM_NLIMITS];
35851 +#endif
35852 struct linux_binprm *bprm;
35853 struct file *file;
35854 struct files_struct *displaced;
35855 @@ -1417,6 +1451,23 @@ int do_execve(const char * filename,
35856 bprm->filename = filename;
35857 bprm->interp = filename;
35858
35859 + if (gr_process_user_ban()) {
35860 + retval = -EPERM;
35861 + goto out_file;
35862 + }
35863 +
35864 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35865 +
35866 + if (gr_handle_nproc()) {
35867 + retval = -EAGAIN;
35868 + goto out_file;
35869 + }
35870 +
35871 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35872 + retval = -EACCES;
35873 + goto out_file;
35874 + }
35875 +
35876 retval = bprm_mm_init(bprm);
35877 if (retval)
35878 goto out_file;
35879 @@ -1446,9 +1497,40 @@ int do_execve(const char * filename,
35880 if (retval < 0)
35881 goto out;
35882
35883 + if (!gr_tpe_allow(file)) {
35884 + retval = -EACCES;
35885 + goto out;
35886 + }
35887 +
35888 + if (gr_check_crash_exec(file)) {
35889 + retval = -EACCES;
35890 + goto out;
35891 + }
35892 +
35893 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35894 +
35895 + gr_handle_exec_args(bprm, argv);
35896 +
35897 +#ifdef CONFIG_GRKERNSEC
35898 + old_acl = current->acl;
35899 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35900 + old_exec_file = current->exec_file;
35901 + get_file(file);
35902 + current->exec_file = file;
35903 +#endif
35904 +
35905 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35906 + bprm->unsafe & LSM_UNSAFE_SHARE);
35907 + if (retval < 0)
35908 + goto out_fail;
35909 +
35910 retval = search_binary_handler(bprm,regs);
35911 if (retval < 0)
35912 - goto out;
35913 + goto out_fail;
35914 +#ifdef CONFIG_GRKERNSEC
35915 + if (old_exec_file)
35916 + fput(old_exec_file);
35917 +#endif
35918
35919 /* execve succeeded */
35920 current->fs->in_exec = 0;
35921 @@ -1459,6 +1541,14 @@ int do_execve(const char * filename,
35922 put_files_struct(displaced);
35923 return retval;
35924
35925 +out_fail:
35926 +#ifdef CONFIG_GRKERNSEC
35927 + current->acl = old_acl;
35928 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35929 + fput(current->exec_file);
35930 + current->exec_file = old_exec_file;
35931 +#endif
35932 +
35933 out:
35934 if (bprm->mm) {
35935 acct_arg_size(bprm, 0);
35936 @@ -1504,7 +1594,7 @@ static int expand_corename(struct core_n
35937 {
35938 char *old_corename = cn->corename;
35939
35940 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
35941 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
35942 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
35943
35944 if (!cn->corename) {
35945 @@ -1557,7 +1647,7 @@ static int format_corename(struct core_n
35946 int pid_in_pattern = 0;
35947 int err = 0;
35948
35949 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
35950 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
35951 cn->corename = kmalloc(cn->size, GFP_KERNEL);
35952 cn->used = 0;
35953
35954 @@ -1645,6 +1735,219 @@ out:
35955 return ispipe;
35956 }
35957
35958 +int pax_check_flags(unsigned long *flags)
35959 +{
35960 + int retval = 0;
35961 +
35962 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
35963 + if (*flags & MF_PAX_SEGMEXEC)
35964 + {
35965 + *flags &= ~MF_PAX_SEGMEXEC;
35966 + retval = -EINVAL;
35967 + }
35968 +#endif
35969 +
35970 + if ((*flags & MF_PAX_PAGEEXEC)
35971 +
35972 +#ifdef CONFIG_PAX_PAGEEXEC
35973 + && (*flags & MF_PAX_SEGMEXEC)
35974 +#endif
35975 +
35976 + )
35977 + {
35978 + *flags &= ~MF_PAX_PAGEEXEC;
35979 + retval = -EINVAL;
35980 + }
35981 +
35982 + if ((*flags & MF_PAX_MPROTECT)
35983 +
35984 +#ifdef CONFIG_PAX_MPROTECT
35985 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35986 +#endif
35987 +
35988 + )
35989 + {
35990 + *flags &= ~MF_PAX_MPROTECT;
35991 + retval = -EINVAL;
35992 + }
35993 +
35994 + if ((*flags & MF_PAX_EMUTRAMP)
35995 +
35996 +#ifdef CONFIG_PAX_EMUTRAMP
35997 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
35998 +#endif
35999 +
36000 + )
36001 + {
36002 + *flags &= ~MF_PAX_EMUTRAMP;
36003 + retval = -EINVAL;
36004 + }
36005 +
36006 + return retval;
36007 +}
36008 +
36009 +EXPORT_SYMBOL(pax_check_flags);
36010 +
36011 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36012 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
36013 +{
36014 + struct task_struct *tsk = current;
36015 + struct mm_struct *mm = current->mm;
36016 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
36017 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
36018 + char *path_exec = NULL;
36019 + char *path_fault = NULL;
36020 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
36021 +
36022 + if (buffer_exec && buffer_fault) {
36023 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
36024 +
36025 + down_read(&mm->mmap_sem);
36026 + vma = mm->mmap;
36027 + while (vma && (!vma_exec || !vma_fault)) {
36028 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
36029 + vma_exec = vma;
36030 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
36031 + vma_fault = vma;
36032 + vma = vma->vm_next;
36033 + }
36034 + if (vma_exec) {
36035 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
36036 + if (IS_ERR(path_exec))
36037 + path_exec = "<path too long>";
36038 + else {
36039 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
36040 + if (path_exec) {
36041 + *path_exec = 0;
36042 + path_exec = buffer_exec;
36043 + } else
36044 + path_exec = "<path too long>";
36045 + }
36046 + }
36047 + if (vma_fault) {
36048 + start = vma_fault->vm_start;
36049 + end = vma_fault->vm_end;
36050 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
36051 + if (vma_fault->vm_file) {
36052 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36053 + if (IS_ERR(path_fault))
36054 + path_fault = "<path too long>";
36055 + else {
36056 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36057 + if (path_fault) {
36058 + *path_fault = 0;
36059 + path_fault = buffer_fault;
36060 + } else
36061 + path_fault = "<path too long>";
36062 + }
36063 + } else
36064 + path_fault = "<anonymous mapping>";
36065 + }
36066 + up_read(&mm->mmap_sem);
36067 + }
36068 + if (tsk->signal->curr_ip)
36069 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36070 + else
36071 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36072 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36073 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36074 + task_uid(tsk), task_euid(tsk), pc, sp);
36075 + free_page((unsigned long)buffer_exec);
36076 + free_page((unsigned long)buffer_fault);
36077 + pax_report_insns(pc, sp);
36078 + do_coredump(SIGKILL, SIGKILL, regs);
36079 +}
36080 +#endif
36081 +
36082 +#ifdef CONFIG_PAX_REFCOUNT
36083 +void pax_report_refcount_overflow(struct pt_regs *regs)
36084 +{
36085 + if (current->signal->curr_ip)
36086 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36087 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36088 + else
36089 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36090 + current->comm, task_pid_nr(current), current_uid(), current_euid());
36091 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36092 + show_regs(regs);
36093 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36094 +}
36095 +#endif
36096 +
36097 +#ifdef CONFIG_PAX_USERCOPY
36098 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36099 +int object_is_on_stack(const void *obj, unsigned long len)
36100 +{
36101 + const void * const stack = task_stack_page(current);
36102 + const void * const stackend = stack + THREAD_SIZE;
36103 +
36104 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36105 + const void *frame = NULL;
36106 + const void *oldframe;
36107 +#endif
36108 +
36109 + if (obj + len < obj)
36110 + return -1;
36111 +
36112 + if (obj + len <= stack || stackend <= obj)
36113 + return 0;
36114 +
36115 + if (obj < stack || stackend < obj + len)
36116 + return -1;
36117 +
36118 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36119 + oldframe = __builtin_frame_address(1);
36120 + if (oldframe)
36121 + frame = __builtin_frame_address(2);
36122 + /*
36123 + low ----------------------------------------------> high
36124 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
36125 + ^----------------^
36126 + allow copies only within here
36127 + */
36128 + while (stack <= frame && frame < stackend) {
36129 + /* if obj + len extends past the last frame, this
36130 + check won't pass and the next frame will be 0,
36131 + causing us to bail out and correctly report
36132 + the copy as invalid
36133 + */
36134 + if (obj + len <= frame)
36135 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36136 + oldframe = frame;
36137 + frame = *(const void * const *)frame;
36138 + }
36139 + return -1;
36140 +#else
36141 + return 1;
36142 +#endif
36143 +}
36144 +
36145 +
36146 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36147 +{
36148 + if (current->signal->curr_ip)
36149 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36150 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36151 + else
36152 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36153 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36154 + dump_stack();
36155 + gr_handle_kernel_exploit();
36156 + do_group_exit(SIGKILL);
36157 +}
36158 +#endif
36159 +
36160 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36161 +void pax_track_stack(void)
36162 +{
36163 + unsigned long sp = (unsigned long)&sp;
36164 + if (sp < current_thread_info()->lowest_stack &&
36165 + sp > (unsigned long)task_stack_page(current))
36166 + current_thread_info()->lowest_stack = sp;
36167 +}
36168 +EXPORT_SYMBOL(pax_track_stack);
36169 +#endif
36170 +
36171 static int zap_process(struct task_struct *start, int exit_code)
36172 {
36173 struct task_struct *t;
36174 @@ -1855,17 +2158,17 @@ static void wait_for_dump_helpers(struct
36175 pipe = file->f_path.dentry->d_inode->i_pipe;
36176
36177 pipe_lock(pipe);
36178 - pipe->readers++;
36179 - pipe->writers--;
36180 + atomic_inc(&pipe->readers);
36181 + atomic_dec(&pipe->writers);
36182
36183 - while ((pipe->readers > 1) && (!signal_pending(current))) {
36184 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36185 wake_up_interruptible_sync(&pipe->wait);
36186 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36187 pipe_wait(pipe);
36188 }
36189
36190 - pipe->readers--;
36191 - pipe->writers++;
36192 + atomic_dec(&pipe->readers);
36193 + atomic_inc(&pipe->writers);
36194 pipe_unlock(pipe);
36195
36196 }
36197 @@ -1926,7 +2229,7 @@ void do_coredump(long signr, int exit_co
36198 int retval = 0;
36199 int flag = 0;
36200 int ispipe;
36201 - static atomic_t core_dump_count = ATOMIC_INIT(0);
36202 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36203 struct coredump_params cprm = {
36204 .signr = signr,
36205 .regs = regs,
36206 @@ -1941,6 +2244,9 @@ void do_coredump(long signr, int exit_co
36207
36208 audit_core_dumps(signr);
36209
36210 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36211 + gr_handle_brute_attach(current, cprm.mm_flags);
36212 +
36213 binfmt = mm->binfmt;
36214 if (!binfmt || !binfmt->core_dump)
36215 goto fail;
36216 @@ -1981,6 +2287,8 @@ void do_coredump(long signr, int exit_co
36217 goto fail_corename;
36218 }
36219
36220 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36221 +
36222 if (ispipe) {
36223 int dump_count;
36224 char **helper_argv;
36225 @@ -2008,7 +2316,7 @@ void do_coredump(long signr, int exit_co
36226 }
36227 cprm.limit = RLIM_INFINITY;
36228
36229 - dump_count = atomic_inc_return(&core_dump_count);
36230 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
36231 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36232 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36233 task_tgid_vnr(current), current->comm);
36234 @@ -2078,7 +2386,7 @@ close_fail:
36235 filp_close(cprm.file, NULL);
36236 fail_dropcount:
36237 if (ispipe)
36238 - atomic_dec(&core_dump_count);
36239 + atomic_dec_unchecked(&core_dump_count);
36240 fail_unlock:
36241 kfree(cn.corename);
36242 fail_corename:
36243 diff -urNp linux-2.6.39.4/fs/ext2/balloc.c linux-2.6.39.4/fs/ext2/balloc.c
36244 --- linux-2.6.39.4/fs/ext2/balloc.c 2011-05-19 00:06:34.000000000 -0400
36245 +++ linux-2.6.39.4/fs/ext2/balloc.c 2011-08-05 19:44:37.000000000 -0400
36246 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36247
36248 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36249 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36250 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36251 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36252 sbi->s_resuid != current_fsuid() &&
36253 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36254 return 0;
36255 diff -urNp linux-2.6.39.4/fs/ext3/balloc.c linux-2.6.39.4/fs/ext3/balloc.c
36256 --- linux-2.6.39.4/fs/ext3/balloc.c 2011-05-19 00:06:34.000000000 -0400
36257 +++ linux-2.6.39.4/fs/ext3/balloc.c 2011-08-05 19:44:37.000000000 -0400
36258 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36259
36260 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36261 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36262 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36263 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36264 sbi->s_resuid != current_fsuid() &&
36265 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36266 return 0;
36267 diff -urNp linux-2.6.39.4/fs/ext4/balloc.c linux-2.6.39.4/fs/ext4/balloc.c
36268 --- linux-2.6.39.4/fs/ext4/balloc.c 2011-05-19 00:06:34.000000000 -0400
36269 +++ linux-2.6.39.4/fs/ext4/balloc.c 2011-08-05 19:44:37.000000000 -0400
36270 @@ -522,7 +522,7 @@ static int ext4_has_free_blocks(struct e
36271 /* Hm, nope. Are (enough) root reserved blocks available? */
36272 if (sbi->s_resuid == current_fsuid() ||
36273 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36274 - capable(CAP_SYS_RESOURCE)) {
36275 + capable_nolog(CAP_SYS_RESOURCE)) {
36276 if (free_blocks >= (nblocks + dirty_blocks))
36277 return 1;
36278 }
36279 diff -urNp linux-2.6.39.4/fs/ext4/ext4.h linux-2.6.39.4/fs/ext4/ext4.h
36280 --- linux-2.6.39.4/fs/ext4/ext4.h 2011-06-03 00:04:14.000000000 -0400
36281 +++ linux-2.6.39.4/fs/ext4/ext4.h 2011-08-05 19:44:37.000000000 -0400
36282 @@ -1166,19 +1166,19 @@ struct ext4_sb_info {
36283 unsigned long s_mb_last_start;
36284
36285 /* stats for buddy allocator */
36286 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36287 - atomic_t s_bal_success; /* we found long enough chunks */
36288 - atomic_t s_bal_allocated; /* in blocks */
36289 - atomic_t s_bal_ex_scanned; /* total extents scanned */
36290 - atomic_t s_bal_goals; /* goal hits */
36291 - atomic_t s_bal_breaks; /* too long searches */
36292 - atomic_t s_bal_2orders; /* 2^order hits */
36293 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36294 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36295 + atomic_unchecked_t s_bal_allocated; /* in blocks */
36296 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36297 + atomic_unchecked_t s_bal_goals; /* goal hits */
36298 + atomic_unchecked_t s_bal_breaks; /* too long searches */
36299 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36300 spinlock_t s_bal_lock;
36301 unsigned long s_mb_buddies_generated;
36302 unsigned long long s_mb_generation_time;
36303 - atomic_t s_mb_lost_chunks;
36304 - atomic_t s_mb_preallocated;
36305 - atomic_t s_mb_discarded;
36306 + atomic_unchecked_t s_mb_lost_chunks;
36307 + atomic_unchecked_t s_mb_preallocated;
36308 + atomic_unchecked_t s_mb_discarded;
36309 atomic_t s_lock_busy;
36310
36311 /* locality groups */
36312 diff -urNp linux-2.6.39.4/fs/ext4/mballoc.c linux-2.6.39.4/fs/ext4/mballoc.c
36313 --- linux-2.6.39.4/fs/ext4/mballoc.c 2011-06-03 00:04:14.000000000 -0400
36314 +++ linux-2.6.39.4/fs/ext4/mballoc.c 2011-08-05 19:44:37.000000000 -0400
36315 @@ -1853,7 +1853,7 @@ void ext4_mb_simple_scan_group(struct ex
36316 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36317
36318 if (EXT4_SB(sb)->s_mb_stats)
36319 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36320 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36321
36322 break;
36323 }
36324 @@ -2147,7 +2147,7 @@ repeat:
36325 ac->ac_status = AC_STATUS_CONTINUE;
36326 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36327 cr = 3;
36328 - atomic_inc(&sbi->s_mb_lost_chunks);
36329 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36330 goto repeat;
36331 }
36332 }
36333 @@ -2190,6 +2190,8 @@ static int ext4_mb_seq_groups_show(struc
36334 ext4_grpblk_t counters[16];
36335 } sg;
36336
36337 + pax_track_stack();
36338 +
36339 group--;
36340 if (group == 0)
36341 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36342 @@ -2613,25 +2615,25 @@ int ext4_mb_release(struct super_block *
36343 if (sbi->s_mb_stats) {
36344 printk(KERN_INFO
36345 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36346 - atomic_read(&sbi->s_bal_allocated),
36347 - atomic_read(&sbi->s_bal_reqs),
36348 - atomic_read(&sbi->s_bal_success));
36349 + atomic_read_unchecked(&sbi->s_bal_allocated),
36350 + atomic_read_unchecked(&sbi->s_bal_reqs),
36351 + atomic_read_unchecked(&sbi->s_bal_success));
36352 printk(KERN_INFO
36353 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36354 "%u 2^N hits, %u breaks, %u lost\n",
36355 - atomic_read(&sbi->s_bal_ex_scanned),
36356 - atomic_read(&sbi->s_bal_goals),
36357 - atomic_read(&sbi->s_bal_2orders),
36358 - atomic_read(&sbi->s_bal_breaks),
36359 - atomic_read(&sbi->s_mb_lost_chunks));
36360 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36361 + atomic_read_unchecked(&sbi->s_bal_goals),
36362 + atomic_read_unchecked(&sbi->s_bal_2orders),
36363 + atomic_read_unchecked(&sbi->s_bal_breaks),
36364 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36365 printk(KERN_INFO
36366 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36367 sbi->s_mb_buddies_generated++,
36368 sbi->s_mb_generation_time);
36369 printk(KERN_INFO
36370 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36371 - atomic_read(&sbi->s_mb_preallocated),
36372 - atomic_read(&sbi->s_mb_discarded));
36373 + atomic_read_unchecked(&sbi->s_mb_preallocated),
36374 + atomic_read_unchecked(&sbi->s_mb_discarded));
36375 }
36376
36377 free_percpu(sbi->s_locality_groups);
36378 @@ -3107,16 +3109,16 @@ static void ext4_mb_collect_stats(struct
36379 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36380
36381 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36382 - atomic_inc(&sbi->s_bal_reqs);
36383 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36384 + atomic_inc_unchecked(&sbi->s_bal_reqs);
36385 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36386 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36387 - atomic_inc(&sbi->s_bal_success);
36388 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36389 + atomic_inc_unchecked(&sbi->s_bal_success);
36390 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36391 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36392 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36393 - atomic_inc(&sbi->s_bal_goals);
36394 + atomic_inc_unchecked(&sbi->s_bal_goals);
36395 if (ac->ac_found > sbi->s_mb_max_to_scan)
36396 - atomic_inc(&sbi->s_bal_breaks);
36397 + atomic_inc_unchecked(&sbi->s_bal_breaks);
36398 }
36399
36400 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36401 @@ -3514,7 +3516,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36402 trace_ext4_mb_new_inode_pa(ac, pa);
36403
36404 ext4_mb_use_inode_pa(ac, pa);
36405 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36406 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36407
36408 ei = EXT4_I(ac->ac_inode);
36409 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36410 @@ -3574,7 +3576,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36411 trace_ext4_mb_new_group_pa(ac, pa);
36412
36413 ext4_mb_use_group_pa(ac, pa);
36414 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36415 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36416
36417 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36418 lg = ac->ac_lg;
36419 @@ -3661,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36420 * from the bitmap and continue.
36421 */
36422 }
36423 - atomic_add(free, &sbi->s_mb_discarded);
36424 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
36425
36426 return err;
36427 }
36428 @@ -3679,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36429 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36430 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36431 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36432 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36433 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36434 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36435
36436 return 0;
36437 diff -urNp linux-2.6.39.4/fs/fcntl.c linux-2.6.39.4/fs/fcntl.c
36438 --- linux-2.6.39.4/fs/fcntl.c 2011-05-19 00:06:34.000000000 -0400
36439 +++ linux-2.6.39.4/fs/fcntl.c 2011-08-05 19:44:37.000000000 -0400
36440 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36441 if (err)
36442 return err;
36443
36444 + if (gr_handle_chroot_fowner(pid, type))
36445 + return -ENOENT;
36446 + if (gr_check_protected_task_fowner(pid, type))
36447 + return -EACCES;
36448 +
36449 f_modown(filp, pid, type, force);
36450 return 0;
36451 }
36452 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36453 switch (cmd) {
36454 case F_DUPFD:
36455 case F_DUPFD_CLOEXEC:
36456 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36457 if (arg >= rlimit(RLIMIT_NOFILE))
36458 break;
36459 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36460 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36461 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36462 * is defined as O_NONBLOCK on some platforms and not on others.
36463 */
36464 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36465 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36466 O_RDONLY | O_WRONLY | O_RDWR |
36467 O_CREAT | O_EXCL | O_NOCTTY |
36468 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36469 __O_SYNC | O_DSYNC | FASYNC |
36470 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36471 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36472 - __FMODE_EXEC | O_PATH
36473 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
36474 ));
36475
36476 fasync_cache = kmem_cache_create("fasync_cache",
36477 diff -urNp linux-2.6.39.4/fs/fifo.c linux-2.6.39.4/fs/fifo.c
36478 --- linux-2.6.39.4/fs/fifo.c 2011-05-19 00:06:34.000000000 -0400
36479 +++ linux-2.6.39.4/fs/fifo.c 2011-08-05 19:44:37.000000000 -0400
36480 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36481 */
36482 filp->f_op = &read_pipefifo_fops;
36483 pipe->r_counter++;
36484 - if (pipe->readers++ == 0)
36485 + if (atomic_inc_return(&pipe->readers) == 1)
36486 wake_up_partner(inode);
36487
36488 - if (!pipe->writers) {
36489 + if (!atomic_read(&pipe->writers)) {
36490 if ((filp->f_flags & O_NONBLOCK)) {
36491 /* suppress POLLHUP until we have
36492 * seen a writer */
36493 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36494 * errno=ENXIO when there is no process reading the FIFO.
36495 */
36496 ret = -ENXIO;
36497 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36498 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36499 goto err;
36500
36501 filp->f_op = &write_pipefifo_fops;
36502 pipe->w_counter++;
36503 - if (!pipe->writers++)
36504 + if (atomic_inc_return(&pipe->writers) == 1)
36505 wake_up_partner(inode);
36506
36507 - if (!pipe->readers) {
36508 + if (!atomic_read(&pipe->readers)) {
36509 wait_for_partner(inode, &pipe->r_counter);
36510 if (signal_pending(current))
36511 goto err_wr;
36512 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36513 */
36514 filp->f_op = &rdwr_pipefifo_fops;
36515
36516 - pipe->readers++;
36517 - pipe->writers++;
36518 + atomic_inc(&pipe->readers);
36519 + atomic_inc(&pipe->writers);
36520 pipe->r_counter++;
36521 pipe->w_counter++;
36522 - if (pipe->readers == 1 || pipe->writers == 1)
36523 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36524 wake_up_partner(inode);
36525 break;
36526
36527 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36528 return 0;
36529
36530 err_rd:
36531 - if (!--pipe->readers)
36532 + if (atomic_dec_and_test(&pipe->readers))
36533 wake_up_interruptible(&pipe->wait);
36534 ret = -ERESTARTSYS;
36535 goto err;
36536
36537 err_wr:
36538 - if (!--pipe->writers)
36539 + if (atomic_dec_and_test(&pipe->writers))
36540 wake_up_interruptible(&pipe->wait);
36541 ret = -ERESTARTSYS;
36542 goto err;
36543
36544 err:
36545 - if (!pipe->readers && !pipe->writers)
36546 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36547 free_pipe_info(inode);
36548
36549 err_nocleanup:
36550 diff -urNp linux-2.6.39.4/fs/file.c linux-2.6.39.4/fs/file.c
36551 --- linux-2.6.39.4/fs/file.c 2011-05-19 00:06:34.000000000 -0400
36552 +++ linux-2.6.39.4/fs/file.c 2011-08-05 19:44:37.000000000 -0400
36553 @@ -15,6 +15,7 @@
36554 #include <linux/slab.h>
36555 #include <linux/vmalloc.h>
36556 #include <linux/file.h>
36557 +#include <linux/security.h>
36558 #include <linux/fdtable.h>
36559 #include <linux/bitops.h>
36560 #include <linux/interrupt.h>
36561 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36562 * N.B. For clone tasks sharing a files structure, this test
36563 * will limit the total number of files that can be opened.
36564 */
36565 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36566 if (nr >= rlimit(RLIMIT_NOFILE))
36567 return -EMFILE;
36568
36569 diff -urNp linux-2.6.39.4/fs/filesystems.c linux-2.6.39.4/fs/filesystems.c
36570 --- linux-2.6.39.4/fs/filesystems.c 2011-05-19 00:06:34.000000000 -0400
36571 +++ linux-2.6.39.4/fs/filesystems.c 2011-08-05 19:44:37.000000000 -0400
36572 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36573 int len = dot ? dot - name : strlen(name);
36574
36575 fs = __get_fs_type(name, len);
36576 +
36577 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
36578 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36579 +#else
36580 if (!fs && (request_module("%.*s", len, name) == 0))
36581 +#endif
36582 fs = __get_fs_type(name, len);
36583
36584 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36585 diff -urNp linux-2.6.39.4/fs/fscache/cookie.c linux-2.6.39.4/fs/fscache/cookie.c
36586 --- linux-2.6.39.4/fs/fscache/cookie.c 2011-05-19 00:06:34.000000000 -0400
36587 +++ linux-2.6.39.4/fs/fscache/cookie.c 2011-08-05 19:44:37.000000000 -0400
36588 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36589 parent ? (char *) parent->def->name : "<no-parent>",
36590 def->name, netfs_data);
36591
36592 - fscache_stat(&fscache_n_acquires);
36593 + fscache_stat_unchecked(&fscache_n_acquires);
36594
36595 /* if there's no parent cookie, then we don't create one here either */
36596 if (!parent) {
36597 - fscache_stat(&fscache_n_acquires_null);
36598 + fscache_stat_unchecked(&fscache_n_acquires_null);
36599 _leave(" [no parent]");
36600 return NULL;
36601 }
36602 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36603 /* allocate and initialise a cookie */
36604 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36605 if (!cookie) {
36606 - fscache_stat(&fscache_n_acquires_oom);
36607 + fscache_stat_unchecked(&fscache_n_acquires_oom);
36608 _leave(" [ENOMEM]");
36609 return NULL;
36610 }
36611 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36612
36613 switch (cookie->def->type) {
36614 case FSCACHE_COOKIE_TYPE_INDEX:
36615 - fscache_stat(&fscache_n_cookie_index);
36616 + fscache_stat_unchecked(&fscache_n_cookie_index);
36617 break;
36618 case FSCACHE_COOKIE_TYPE_DATAFILE:
36619 - fscache_stat(&fscache_n_cookie_data);
36620 + fscache_stat_unchecked(&fscache_n_cookie_data);
36621 break;
36622 default:
36623 - fscache_stat(&fscache_n_cookie_special);
36624 + fscache_stat_unchecked(&fscache_n_cookie_special);
36625 break;
36626 }
36627
36628 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36629 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36630 atomic_dec(&parent->n_children);
36631 __fscache_cookie_put(cookie);
36632 - fscache_stat(&fscache_n_acquires_nobufs);
36633 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36634 _leave(" = NULL");
36635 return NULL;
36636 }
36637 }
36638
36639 - fscache_stat(&fscache_n_acquires_ok);
36640 + fscache_stat_unchecked(&fscache_n_acquires_ok);
36641 _leave(" = %p", cookie);
36642 return cookie;
36643 }
36644 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36645 cache = fscache_select_cache_for_object(cookie->parent);
36646 if (!cache) {
36647 up_read(&fscache_addremove_sem);
36648 - fscache_stat(&fscache_n_acquires_no_cache);
36649 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36650 _leave(" = -ENOMEDIUM [no cache]");
36651 return -ENOMEDIUM;
36652 }
36653 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36654 object = cache->ops->alloc_object(cache, cookie);
36655 fscache_stat_d(&fscache_n_cop_alloc_object);
36656 if (IS_ERR(object)) {
36657 - fscache_stat(&fscache_n_object_no_alloc);
36658 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
36659 ret = PTR_ERR(object);
36660 goto error;
36661 }
36662
36663 - fscache_stat(&fscache_n_object_alloc);
36664 + fscache_stat_unchecked(&fscache_n_object_alloc);
36665
36666 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36667
36668 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36669 struct fscache_object *object;
36670 struct hlist_node *_p;
36671
36672 - fscache_stat(&fscache_n_updates);
36673 + fscache_stat_unchecked(&fscache_n_updates);
36674
36675 if (!cookie) {
36676 - fscache_stat(&fscache_n_updates_null);
36677 + fscache_stat_unchecked(&fscache_n_updates_null);
36678 _leave(" [no cookie]");
36679 return;
36680 }
36681 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36682 struct fscache_object *object;
36683 unsigned long event;
36684
36685 - fscache_stat(&fscache_n_relinquishes);
36686 + fscache_stat_unchecked(&fscache_n_relinquishes);
36687 if (retire)
36688 - fscache_stat(&fscache_n_relinquishes_retire);
36689 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36690
36691 if (!cookie) {
36692 - fscache_stat(&fscache_n_relinquishes_null);
36693 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
36694 _leave(" [no cookie]");
36695 return;
36696 }
36697 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36698
36699 /* wait for the cookie to finish being instantiated (or to fail) */
36700 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36701 - fscache_stat(&fscache_n_relinquishes_waitcrt);
36702 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36703 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36704 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36705 }
36706 diff -urNp linux-2.6.39.4/fs/fscache/internal.h linux-2.6.39.4/fs/fscache/internal.h
36707 --- linux-2.6.39.4/fs/fscache/internal.h 2011-05-19 00:06:34.000000000 -0400
36708 +++ linux-2.6.39.4/fs/fscache/internal.h 2011-08-05 19:44:37.000000000 -0400
36709 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36710 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36711 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36712
36713 -extern atomic_t fscache_n_op_pend;
36714 -extern atomic_t fscache_n_op_run;
36715 -extern atomic_t fscache_n_op_enqueue;
36716 -extern atomic_t fscache_n_op_deferred_release;
36717 -extern atomic_t fscache_n_op_release;
36718 -extern atomic_t fscache_n_op_gc;
36719 -extern atomic_t fscache_n_op_cancelled;
36720 -extern atomic_t fscache_n_op_rejected;
36721 -
36722 -extern atomic_t fscache_n_attr_changed;
36723 -extern atomic_t fscache_n_attr_changed_ok;
36724 -extern atomic_t fscache_n_attr_changed_nobufs;
36725 -extern atomic_t fscache_n_attr_changed_nomem;
36726 -extern atomic_t fscache_n_attr_changed_calls;
36727 -
36728 -extern atomic_t fscache_n_allocs;
36729 -extern atomic_t fscache_n_allocs_ok;
36730 -extern atomic_t fscache_n_allocs_wait;
36731 -extern atomic_t fscache_n_allocs_nobufs;
36732 -extern atomic_t fscache_n_allocs_intr;
36733 -extern atomic_t fscache_n_allocs_object_dead;
36734 -extern atomic_t fscache_n_alloc_ops;
36735 -extern atomic_t fscache_n_alloc_op_waits;
36736 -
36737 -extern atomic_t fscache_n_retrievals;
36738 -extern atomic_t fscache_n_retrievals_ok;
36739 -extern atomic_t fscache_n_retrievals_wait;
36740 -extern atomic_t fscache_n_retrievals_nodata;
36741 -extern atomic_t fscache_n_retrievals_nobufs;
36742 -extern atomic_t fscache_n_retrievals_intr;
36743 -extern atomic_t fscache_n_retrievals_nomem;
36744 -extern atomic_t fscache_n_retrievals_object_dead;
36745 -extern atomic_t fscache_n_retrieval_ops;
36746 -extern atomic_t fscache_n_retrieval_op_waits;
36747 -
36748 -extern atomic_t fscache_n_stores;
36749 -extern atomic_t fscache_n_stores_ok;
36750 -extern atomic_t fscache_n_stores_again;
36751 -extern atomic_t fscache_n_stores_nobufs;
36752 -extern atomic_t fscache_n_stores_oom;
36753 -extern atomic_t fscache_n_store_ops;
36754 -extern atomic_t fscache_n_store_calls;
36755 -extern atomic_t fscache_n_store_pages;
36756 -extern atomic_t fscache_n_store_radix_deletes;
36757 -extern atomic_t fscache_n_store_pages_over_limit;
36758 -
36759 -extern atomic_t fscache_n_store_vmscan_not_storing;
36760 -extern atomic_t fscache_n_store_vmscan_gone;
36761 -extern atomic_t fscache_n_store_vmscan_busy;
36762 -extern atomic_t fscache_n_store_vmscan_cancelled;
36763 -
36764 -extern atomic_t fscache_n_marks;
36765 -extern atomic_t fscache_n_uncaches;
36766 -
36767 -extern atomic_t fscache_n_acquires;
36768 -extern atomic_t fscache_n_acquires_null;
36769 -extern atomic_t fscache_n_acquires_no_cache;
36770 -extern atomic_t fscache_n_acquires_ok;
36771 -extern atomic_t fscache_n_acquires_nobufs;
36772 -extern atomic_t fscache_n_acquires_oom;
36773 -
36774 -extern atomic_t fscache_n_updates;
36775 -extern atomic_t fscache_n_updates_null;
36776 -extern atomic_t fscache_n_updates_run;
36777 -
36778 -extern atomic_t fscache_n_relinquishes;
36779 -extern atomic_t fscache_n_relinquishes_null;
36780 -extern atomic_t fscache_n_relinquishes_waitcrt;
36781 -extern atomic_t fscache_n_relinquishes_retire;
36782 -
36783 -extern atomic_t fscache_n_cookie_index;
36784 -extern atomic_t fscache_n_cookie_data;
36785 -extern atomic_t fscache_n_cookie_special;
36786 -
36787 -extern atomic_t fscache_n_object_alloc;
36788 -extern atomic_t fscache_n_object_no_alloc;
36789 -extern atomic_t fscache_n_object_lookups;
36790 -extern atomic_t fscache_n_object_lookups_negative;
36791 -extern atomic_t fscache_n_object_lookups_positive;
36792 -extern atomic_t fscache_n_object_lookups_timed_out;
36793 -extern atomic_t fscache_n_object_created;
36794 -extern atomic_t fscache_n_object_avail;
36795 -extern atomic_t fscache_n_object_dead;
36796 -
36797 -extern atomic_t fscache_n_checkaux_none;
36798 -extern atomic_t fscache_n_checkaux_okay;
36799 -extern atomic_t fscache_n_checkaux_update;
36800 -extern atomic_t fscache_n_checkaux_obsolete;
36801 +extern atomic_unchecked_t fscache_n_op_pend;
36802 +extern atomic_unchecked_t fscache_n_op_run;
36803 +extern atomic_unchecked_t fscache_n_op_enqueue;
36804 +extern atomic_unchecked_t fscache_n_op_deferred_release;
36805 +extern atomic_unchecked_t fscache_n_op_release;
36806 +extern atomic_unchecked_t fscache_n_op_gc;
36807 +extern atomic_unchecked_t fscache_n_op_cancelled;
36808 +extern atomic_unchecked_t fscache_n_op_rejected;
36809 +
36810 +extern atomic_unchecked_t fscache_n_attr_changed;
36811 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
36812 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36813 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36814 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
36815 +
36816 +extern atomic_unchecked_t fscache_n_allocs;
36817 +extern atomic_unchecked_t fscache_n_allocs_ok;
36818 +extern atomic_unchecked_t fscache_n_allocs_wait;
36819 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
36820 +extern atomic_unchecked_t fscache_n_allocs_intr;
36821 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
36822 +extern atomic_unchecked_t fscache_n_alloc_ops;
36823 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
36824 +
36825 +extern atomic_unchecked_t fscache_n_retrievals;
36826 +extern atomic_unchecked_t fscache_n_retrievals_ok;
36827 +extern atomic_unchecked_t fscache_n_retrievals_wait;
36828 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
36829 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36830 +extern atomic_unchecked_t fscache_n_retrievals_intr;
36831 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
36832 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36833 +extern atomic_unchecked_t fscache_n_retrieval_ops;
36834 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36835 +
36836 +extern atomic_unchecked_t fscache_n_stores;
36837 +extern atomic_unchecked_t fscache_n_stores_ok;
36838 +extern atomic_unchecked_t fscache_n_stores_again;
36839 +extern atomic_unchecked_t fscache_n_stores_nobufs;
36840 +extern atomic_unchecked_t fscache_n_stores_oom;
36841 +extern atomic_unchecked_t fscache_n_store_ops;
36842 +extern atomic_unchecked_t fscache_n_store_calls;
36843 +extern atomic_unchecked_t fscache_n_store_pages;
36844 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
36845 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36846 +
36847 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36848 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36849 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36850 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36851 +
36852 +extern atomic_unchecked_t fscache_n_marks;
36853 +extern atomic_unchecked_t fscache_n_uncaches;
36854 +
36855 +extern atomic_unchecked_t fscache_n_acquires;
36856 +extern atomic_unchecked_t fscache_n_acquires_null;
36857 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
36858 +extern atomic_unchecked_t fscache_n_acquires_ok;
36859 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
36860 +extern atomic_unchecked_t fscache_n_acquires_oom;
36861 +
36862 +extern atomic_unchecked_t fscache_n_updates;
36863 +extern atomic_unchecked_t fscache_n_updates_null;
36864 +extern atomic_unchecked_t fscache_n_updates_run;
36865 +
36866 +extern atomic_unchecked_t fscache_n_relinquishes;
36867 +extern atomic_unchecked_t fscache_n_relinquishes_null;
36868 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36869 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
36870 +
36871 +extern atomic_unchecked_t fscache_n_cookie_index;
36872 +extern atomic_unchecked_t fscache_n_cookie_data;
36873 +extern atomic_unchecked_t fscache_n_cookie_special;
36874 +
36875 +extern atomic_unchecked_t fscache_n_object_alloc;
36876 +extern atomic_unchecked_t fscache_n_object_no_alloc;
36877 +extern atomic_unchecked_t fscache_n_object_lookups;
36878 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
36879 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
36880 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36881 +extern atomic_unchecked_t fscache_n_object_created;
36882 +extern atomic_unchecked_t fscache_n_object_avail;
36883 +extern atomic_unchecked_t fscache_n_object_dead;
36884 +
36885 +extern atomic_unchecked_t fscache_n_checkaux_none;
36886 +extern atomic_unchecked_t fscache_n_checkaux_okay;
36887 +extern atomic_unchecked_t fscache_n_checkaux_update;
36888 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36889
36890 extern atomic_t fscache_n_cop_alloc_object;
36891 extern atomic_t fscache_n_cop_lookup_object;
36892 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36893 atomic_inc(stat);
36894 }
36895
36896 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36897 +{
36898 + atomic_inc_unchecked(stat);
36899 +}
36900 +
36901 static inline void fscache_stat_d(atomic_t *stat)
36902 {
36903 atomic_dec(stat);
36904 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
36905
36906 #define __fscache_stat(stat) (NULL)
36907 #define fscache_stat(stat) do {} while (0)
36908 +#define fscache_stat_unchecked(stat) do {} while (0)
36909 #define fscache_stat_d(stat) do {} while (0)
36910 #endif
36911
36912 diff -urNp linux-2.6.39.4/fs/fscache/object.c linux-2.6.39.4/fs/fscache/object.c
36913 --- linux-2.6.39.4/fs/fscache/object.c 2011-05-19 00:06:34.000000000 -0400
36914 +++ linux-2.6.39.4/fs/fscache/object.c 2011-08-05 19:44:37.000000000 -0400
36915 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
36916 /* update the object metadata on disk */
36917 case FSCACHE_OBJECT_UPDATING:
36918 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36919 - fscache_stat(&fscache_n_updates_run);
36920 + fscache_stat_unchecked(&fscache_n_updates_run);
36921 fscache_stat(&fscache_n_cop_update_object);
36922 object->cache->ops->update_object(object);
36923 fscache_stat_d(&fscache_n_cop_update_object);
36924 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
36925 spin_lock(&object->lock);
36926 object->state = FSCACHE_OBJECT_DEAD;
36927 spin_unlock(&object->lock);
36928 - fscache_stat(&fscache_n_object_dead);
36929 + fscache_stat_unchecked(&fscache_n_object_dead);
36930 goto terminal_transit;
36931
36932 /* handle the parent cache of this object being withdrawn from
36933 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
36934 spin_lock(&object->lock);
36935 object->state = FSCACHE_OBJECT_DEAD;
36936 spin_unlock(&object->lock);
36937 - fscache_stat(&fscache_n_object_dead);
36938 + fscache_stat_unchecked(&fscache_n_object_dead);
36939 goto terminal_transit;
36940
36941 /* complain about the object being woken up once it is
36942 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
36943 parent->cookie->def->name, cookie->def->name,
36944 object->cache->tag->name);
36945
36946 - fscache_stat(&fscache_n_object_lookups);
36947 + fscache_stat_unchecked(&fscache_n_object_lookups);
36948 fscache_stat(&fscache_n_cop_lookup_object);
36949 ret = object->cache->ops->lookup_object(object);
36950 fscache_stat_d(&fscache_n_cop_lookup_object);
36951 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
36952 if (ret == -ETIMEDOUT) {
36953 /* probably stuck behind another object, so move this one to
36954 * the back of the queue */
36955 - fscache_stat(&fscache_n_object_lookups_timed_out);
36956 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
36957 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36958 }
36959
36960 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
36961
36962 spin_lock(&object->lock);
36963 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36964 - fscache_stat(&fscache_n_object_lookups_negative);
36965 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
36966
36967 /* transit here to allow write requests to begin stacking up
36968 * and read requests to begin returning ENODATA */
36969 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
36970 * result, in which case there may be data available */
36971 spin_lock(&object->lock);
36972 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
36973 - fscache_stat(&fscache_n_object_lookups_positive);
36974 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
36975
36976 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
36977
36978 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
36979 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
36980 } else {
36981 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
36982 - fscache_stat(&fscache_n_object_created);
36983 + fscache_stat_unchecked(&fscache_n_object_created);
36984
36985 object->state = FSCACHE_OBJECT_AVAILABLE;
36986 spin_unlock(&object->lock);
36987 @@ -602,7 +602,7 @@ static void fscache_object_available(str
36988 fscache_enqueue_dependents(object);
36989
36990 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
36991 - fscache_stat(&fscache_n_object_avail);
36992 + fscache_stat_unchecked(&fscache_n_object_avail);
36993
36994 _leave("");
36995 }
36996 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
36997 enum fscache_checkaux result;
36998
36999 if (!object->cookie->def->check_aux) {
37000 - fscache_stat(&fscache_n_checkaux_none);
37001 + fscache_stat_unchecked(&fscache_n_checkaux_none);
37002 return FSCACHE_CHECKAUX_OKAY;
37003 }
37004
37005 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
37006 switch (result) {
37007 /* entry okay as is */
37008 case FSCACHE_CHECKAUX_OKAY:
37009 - fscache_stat(&fscache_n_checkaux_okay);
37010 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
37011 break;
37012
37013 /* entry requires update */
37014 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
37015 - fscache_stat(&fscache_n_checkaux_update);
37016 + fscache_stat_unchecked(&fscache_n_checkaux_update);
37017 break;
37018
37019 /* entry requires deletion */
37020 case FSCACHE_CHECKAUX_OBSOLETE:
37021 - fscache_stat(&fscache_n_checkaux_obsolete);
37022 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
37023 break;
37024
37025 default:
37026 diff -urNp linux-2.6.39.4/fs/fscache/operation.c linux-2.6.39.4/fs/fscache/operation.c
37027 --- linux-2.6.39.4/fs/fscache/operation.c 2011-05-19 00:06:34.000000000 -0400
37028 +++ linux-2.6.39.4/fs/fscache/operation.c 2011-08-05 19:44:37.000000000 -0400
37029 @@ -17,7 +17,7 @@
37030 #include <linux/slab.h>
37031 #include "internal.h"
37032
37033 -atomic_t fscache_op_debug_id;
37034 +atomic_unchecked_t fscache_op_debug_id;
37035 EXPORT_SYMBOL(fscache_op_debug_id);
37036
37037 /**
37038 @@ -40,7 +40,7 @@ void fscache_enqueue_operation(struct fs
37039 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
37040 ASSERTCMP(atomic_read(&op->usage), >, 0);
37041
37042 - fscache_stat(&fscache_n_op_enqueue);
37043 + fscache_stat_unchecked(&fscache_n_op_enqueue);
37044 switch (op->flags & FSCACHE_OP_TYPE) {
37045 case FSCACHE_OP_ASYNC:
37046 _debug("queue async");
37047 @@ -73,7 +73,7 @@ static void fscache_run_op(struct fscach
37048 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
37049 if (op->processor)
37050 fscache_enqueue_operation(op);
37051 - fscache_stat(&fscache_n_op_run);
37052 + fscache_stat_unchecked(&fscache_n_op_run);
37053 }
37054
37055 /*
37056 @@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct f
37057 if (object->n_ops > 1) {
37058 atomic_inc(&op->usage);
37059 list_add_tail(&op->pend_link, &object->pending_ops);
37060 - fscache_stat(&fscache_n_op_pend);
37061 + fscache_stat_unchecked(&fscache_n_op_pend);
37062 } else if (!list_empty(&object->pending_ops)) {
37063 atomic_inc(&op->usage);
37064 list_add_tail(&op->pend_link, &object->pending_ops);
37065 - fscache_stat(&fscache_n_op_pend);
37066 + fscache_stat_unchecked(&fscache_n_op_pend);
37067 fscache_start_operations(object);
37068 } else {
37069 ASSERTCMP(object->n_in_progress, ==, 0);
37070 @@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct f
37071 object->n_exclusive++; /* reads and writes must wait */
37072 atomic_inc(&op->usage);
37073 list_add_tail(&op->pend_link, &object->pending_ops);
37074 - fscache_stat(&fscache_n_op_pend);
37075 + fscache_stat_unchecked(&fscache_n_op_pend);
37076 ret = 0;
37077 } else {
37078 /* not allowed to submit ops in any other state */
37079 @@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_obj
37080 if (object->n_exclusive > 0) {
37081 atomic_inc(&op->usage);
37082 list_add_tail(&op->pend_link, &object->pending_ops);
37083 - fscache_stat(&fscache_n_op_pend);
37084 + fscache_stat_unchecked(&fscache_n_op_pend);
37085 } else if (!list_empty(&object->pending_ops)) {
37086 atomic_inc(&op->usage);
37087 list_add_tail(&op->pend_link, &object->pending_ops);
37088 - fscache_stat(&fscache_n_op_pend);
37089 + fscache_stat_unchecked(&fscache_n_op_pend);
37090 fscache_start_operations(object);
37091 } else {
37092 ASSERTCMP(object->n_exclusive, ==, 0);
37093 @@ -227,12 +227,12 @@ int fscache_submit_op(struct fscache_obj
37094 object->n_ops++;
37095 atomic_inc(&op->usage);
37096 list_add_tail(&op->pend_link, &object->pending_ops);
37097 - fscache_stat(&fscache_n_op_pend);
37098 + fscache_stat_unchecked(&fscache_n_op_pend);
37099 ret = 0;
37100 } else if (object->state == FSCACHE_OBJECT_DYING ||
37101 object->state == FSCACHE_OBJECT_LC_DYING ||
37102 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37103 - fscache_stat(&fscache_n_op_rejected);
37104 + fscache_stat_unchecked(&fscache_n_op_rejected);
37105 ret = -ENOBUFS;
37106 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37107 fscache_report_unexpected_submission(object, op, ostate);
37108 @@ -302,7 +302,7 @@ int fscache_cancel_op(struct fscache_ope
37109
37110 ret = -EBUSY;
37111 if (!list_empty(&op->pend_link)) {
37112 - fscache_stat(&fscache_n_op_cancelled);
37113 + fscache_stat_unchecked(&fscache_n_op_cancelled);
37114 list_del_init(&op->pend_link);
37115 object->n_ops--;
37116 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37117 @@ -341,7 +341,7 @@ void fscache_put_operation(struct fscach
37118 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37119 BUG();
37120
37121 - fscache_stat(&fscache_n_op_release);
37122 + fscache_stat_unchecked(&fscache_n_op_release);
37123
37124 if (op->release) {
37125 op->release(op);
37126 @@ -358,7 +358,7 @@ void fscache_put_operation(struct fscach
37127 * lock, and defer it otherwise */
37128 if (!spin_trylock(&object->lock)) {
37129 _debug("defer put");
37130 - fscache_stat(&fscache_n_op_deferred_release);
37131 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
37132
37133 cache = object->cache;
37134 spin_lock(&cache->op_gc_list_lock);
37135 @@ -420,7 +420,7 @@ void fscache_operation_gc(struct work_st
37136
37137 _debug("GC DEFERRED REL OBJ%x OP%x",
37138 object->debug_id, op->debug_id);
37139 - fscache_stat(&fscache_n_op_gc);
37140 + fscache_stat_unchecked(&fscache_n_op_gc);
37141
37142 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37143
37144 diff -urNp linux-2.6.39.4/fs/fscache/page.c linux-2.6.39.4/fs/fscache/page.c
37145 --- linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:11:51.000000000 -0400
37146 +++ linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:12:20.000000000 -0400
37147 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37148 val = radix_tree_lookup(&cookie->stores, page->index);
37149 if (!val) {
37150 rcu_read_unlock();
37151 - fscache_stat(&fscache_n_store_vmscan_not_storing);
37152 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37153 __fscache_uncache_page(cookie, page);
37154 return true;
37155 }
37156 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37157 spin_unlock(&cookie->stores_lock);
37158
37159 if (xpage) {
37160 - fscache_stat(&fscache_n_store_vmscan_cancelled);
37161 - fscache_stat(&fscache_n_store_radix_deletes);
37162 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37163 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37164 ASSERTCMP(xpage, ==, page);
37165 } else {
37166 - fscache_stat(&fscache_n_store_vmscan_gone);
37167 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37168 }
37169
37170 wake_up_bit(&cookie->flags, 0);
37171 @@ -107,7 +107,7 @@ page_busy:
37172 /* we might want to wait here, but that could deadlock the allocator as
37173 * the work threads writing to the cache may all end up sleeping
37174 * on memory allocation */
37175 - fscache_stat(&fscache_n_store_vmscan_busy);
37176 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37177 return false;
37178 }
37179 EXPORT_SYMBOL(__fscache_maybe_release_page);
37180 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37181 FSCACHE_COOKIE_STORING_TAG);
37182 if (!radix_tree_tag_get(&cookie->stores, page->index,
37183 FSCACHE_COOKIE_PENDING_TAG)) {
37184 - fscache_stat(&fscache_n_store_radix_deletes);
37185 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37186 xpage = radix_tree_delete(&cookie->stores, page->index);
37187 }
37188 spin_unlock(&cookie->stores_lock);
37189 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37190
37191 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37192
37193 - fscache_stat(&fscache_n_attr_changed_calls);
37194 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37195
37196 if (fscache_object_is_active(object)) {
37197 fscache_set_op_state(op, "CallFS");
37198 @@ -179,11 +179,11 @@ int __fscache_attr_changed(struct fscach
37199
37200 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37201
37202 - fscache_stat(&fscache_n_attr_changed);
37203 + fscache_stat_unchecked(&fscache_n_attr_changed);
37204
37205 op = kzalloc(sizeof(*op), GFP_KERNEL);
37206 if (!op) {
37207 - fscache_stat(&fscache_n_attr_changed_nomem);
37208 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37209 _leave(" = -ENOMEM");
37210 return -ENOMEM;
37211 }
37212 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
37213 if (fscache_submit_exclusive_op(object, op) < 0)
37214 goto nobufs;
37215 spin_unlock(&cookie->lock);
37216 - fscache_stat(&fscache_n_attr_changed_ok);
37217 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37218 fscache_put_operation(op);
37219 _leave(" = 0");
37220 return 0;
37221 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
37222 nobufs:
37223 spin_unlock(&cookie->lock);
37224 kfree(op);
37225 - fscache_stat(&fscache_n_attr_changed_nobufs);
37226 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37227 _leave(" = %d", -ENOBUFS);
37228 return -ENOBUFS;
37229 }
37230 @@ -246,7 +246,7 @@ static struct fscache_retrieval *fscache
37231 /* allocate a retrieval operation and attempt to submit it */
37232 op = kzalloc(sizeof(*op), GFP_NOIO);
37233 if (!op) {
37234 - fscache_stat(&fscache_n_retrievals_nomem);
37235 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37236 return NULL;
37237 }
37238
37239 @@ -275,13 +275,13 @@ static int fscache_wait_for_deferred_loo
37240 return 0;
37241 }
37242
37243 - fscache_stat(&fscache_n_retrievals_wait);
37244 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
37245
37246 jif = jiffies;
37247 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37248 fscache_wait_bit_interruptible,
37249 TASK_INTERRUPTIBLE) != 0) {
37250 - fscache_stat(&fscache_n_retrievals_intr);
37251 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37252 _leave(" = -ERESTARTSYS");
37253 return -ERESTARTSYS;
37254 }
37255 @@ -299,8 +299,8 @@ static int fscache_wait_for_deferred_loo
37256 */
37257 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37258 struct fscache_retrieval *op,
37259 - atomic_t *stat_op_waits,
37260 - atomic_t *stat_object_dead)
37261 + atomic_unchecked_t *stat_op_waits,
37262 + atomic_unchecked_t *stat_object_dead)
37263 {
37264 int ret;
37265
37266 @@ -308,7 +308,7 @@ static int fscache_wait_for_retrieval_ac
37267 goto check_if_dead;
37268
37269 _debug(">>> WT");
37270 - fscache_stat(stat_op_waits);
37271 + fscache_stat_unchecked(stat_op_waits);
37272 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37273 fscache_wait_bit_interruptible,
37274 TASK_INTERRUPTIBLE) < 0) {
37275 @@ -325,7 +325,7 @@ static int fscache_wait_for_retrieval_ac
37276
37277 check_if_dead:
37278 if (unlikely(fscache_object_is_dead(object))) {
37279 - fscache_stat(stat_object_dead);
37280 + fscache_stat_unchecked(stat_object_dead);
37281 return -ENOBUFS;
37282 }
37283 return 0;
37284 @@ -352,7 +352,7 @@ int __fscache_read_or_alloc_page(struct
37285
37286 _enter("%p,%p,,,", cookie, page);
37287
37288 - fscache_stat(&fscache_n_retrievals);
37289 + fscache_stat_unchecked(&fscache_n_retrievals);
37290
37291 if (hlist_empty(&cookie->backing_objects))
37292 goto nobufs;
37293 @@ -386,7 +386,7 @@ int __fscache_read_or_alloc_page(struct
37294 goto nobufs_unlock;
37295 spin_unlock(&cookie->lock);
37296
37297 - fscache_stat(&fscache_n_retrieval_ops);
37298 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37299
37300 /* pin the netfs read context in case we need to do the actual netfs
37301 * read because we've encountered a cache read failure */
37302 @@ -416,15 +416,15 @@ int __fscache_read_or_alloc_page(struct
37303
37304 error:
37305 if (ret == -ENOMEM)
37306 - fscache_stat(&fscache_n_retrievals_nomem);
37307 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37308 else if (ret == -ERESTARTSYS)
37309 - fscache_stat(&fscache_n_retrievals_intr);
37310 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37311 else if (ret == -ENODATA)
37312 - fscache_stat(&fscache_n_retrievals_nodata);
37313 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37314 else if (ret < 0)
37315 - fscache_stat(&fscache_n_retrievals_nobufs);
37316 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37317 else
37318 - fscache_stat(&fscache_n_retrievals_ok);
37319 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37320
37321 fscache_put_retrieval(op);
37322 _leave(" = %d", ret);
37323 @@ -434,7 +434,7 @@ nobufs_unlock:
37324 spin_unlock(&cookie->lock);
37325 kfree(op);
37326 nobufs:
37327 - fscache_stat(&fscache_n_retrievals_nobufs);
37328 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37329 _leave(" = -ENOBUFS");
37330 return -ENOBUFS;
37331 }
37332 @@ -472,7 +472,7 @@ int __fscache_read_or_alloc_pages(struct
37333
37334 _enter("%p,,%d,,,", cookie, *nr_pages);
37335
37336 - fscache_stat(&fscache_n_retrievals);
37337 + fscache_stat_unchecked(&fscache_n_retrievals);
37338
37339 if (hlist_empty(&cookie->backing_objects))
37340 goto nobufs;
37341 @@ -503,7 +503,7 @@ int __fscache_read_or_alloc_pages(struct
37342 goto nobufs_unlock;
37343 spin_unlock(&cookie->lock);
37344
37345 - fscache_stat(&fscache_n_retrieval_ops);
37346 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37347
37348 /* pin the netfs read context in case we need to do the actual netfs
37349 * read because we've encountered a cache read failure */
37350 @@ -533,15 +533,15 @@ int __fscache_read_or_alloc_pages(struct
37351
37352 error:
37353 if (ret == -ENOMEM)
37354 - fscache_stat(&fscache_n_retrievals_nomem);
37355 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37356 else if (ret == -ERESTARTSYS)
37357 - fscache_stat(&fscache_n_retrievals_intr);
37358 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37359 else if (ret == -ENODATA)
37360 - fscache_stat(&fscache_n_retrievals_nodata);
37361 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37362 else if (ret < 0)
37363 - fscache_stat(&fscache_n_retrievals_nobufs);
37364 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37365 else
37366 - fscache_stat(&fscache_n_retrievals_ok);
37367 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37368
37369 fscache_put_retrieval(op);
37370 _leave(" = %d", ret);
37371 @@ -551,7 +551,7 @@ nobufs_unlock:
37372 spin_unlock(&cookie->lock);
37373 kfree(op);
37374 nobufs:
37375 - fscache_stat(&fscache_n_retrievals_nobufs);
37376 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37377 _leave(" = -ENOBUFS");
37378 return -ENOBUFS;
37379 }
37380 @@ -575,7 +575,7 @@ int __fscache_alloc_page(struct fscache_
37381
37382 _enter("%p,%p,,,", cookie, page);
37383
37384 - fscache_stat(&fscache_n_allocs);
37385 + fscache_stat_unchecked(&fscache_n_allocs);
37386
37387 if (hlist_empty(&cookie->backing_objects))
37388 goto nobufs;
37389 @@ -602,7 +602,7 @@ int __fscache_alloc_page(struct fscache_
37390 goto nobufs_unlock;
37391 spin_unlock(&cookie->lock);
37392
37393 - fscache_stat(&fscache_n_alloc_ops);
37394 + fscache_stat_unchecked(&fscache_n_alloc_ops);
37395
37396 ret = fscache_wait_for_retrieval_activation(
37397 object, op,
37398 @@ -618,11 +618,11 @@ int __fscache_alloc_page(struct fscache_
37399
37400 error:
37401 if (ret == -ERESTARTSYS)
37402 - fscache_stat(&fscache_n_allocs_intr);
37403 + fscache_stat_unchecked(&fscache_n_allocs_intr);
37404 else if (ret < 0)
37405 - fscache_stat(&fscache_n_allocs_nobufs);
37406 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37407 else
37408 - fscache_stat(&fscache_n_allocs_ok);
37409 + fscache_stat_unchecked(&fscache_n_allocs_ok);
37410
37411 fscache_put_retrieval(op);
37412 _leave(" = %d", ret);
37413 @@ -632,7 +632,7 @@ nobufs_unlock:
37414 spin_unlock(&cookie->lock);
37415 kfree(op);
37416 nobufs:
37417 - fscache_stat(&fscache_n_allocs_nobufs);
37418 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37419 _leave(" = -ENOBUFS");
37420 return -ENOBUFS;
37421 }
37422 @@ -675,7 +675,7 @@ static void fscache_write_op(struct fsca
37423
37424 spin_lock(&cookie->stores_lock);
37425
37426 - fscache_stat(&fscache_n_store_calls);
37427 + fscache_stat_unchecked(&fscache_n_store_calls);
37428
37429 /* find a page to store */
37430 page = NULL;
37431 @@ -686,7 +686,7 @@ static void fscache_write_op(struct fsca
37432 page = results[0];
37433 _debug("gang %d [%lx]", n, page->index);
37434 if (page->index > op->store_limit) {
37435 - fscache_stat(&fscache_n_store_pages_over_limit);
37436 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37437 goto superseded;
37438 }
37439
37440 @@ -699,7 +699,7 @@ static void fscache_write_op(struct fsca
37441 spin_unlock(&object->lock);
37442
37443 fscache_set_op_state(&op->op, "Store");
37444 - fscache_stat(&fscache_n_store_pages);
37445 + fscache_stat_unchecked(&fscache_n_store_pages);
37446 fscache_stat(&fscache_n_cop_write_page);
37447 ret = object->cache->ops->write_page(op, page);
37448 fscache_stat_d(&fscache_n_cop_write_page);
37449 @@ -769,7 +769,7 @@ int __fscache_write_page(struct fscache_
37450 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37451 ASSERT(PageFsCache(page));
37452
37453 - fscache_stat(&fscache_n_stores);
37454 + fscache_stat_unchecked(&fscache_n_stores);
37455
37456 op = kzalloc(sizeof(*op), GFP_NOIO);
37457 if (!op)
37458 @@ -821,7 +821,7 @@ int __fscache_write_page(struct fscache_
37459 spin_unlock(&cookie->stores_lock);
37460 spin_unlock(&object->lock);
37461
37462 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37463 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37464 op->store_limit = object->store_limit;
37465
37466 if (fscache_submit_op(object, &op->op) < 0)
37467 @@ -829,8 +829,8 @@ int __fscache_write_page(struct fscache_
37468
37469 spin_unlock(&cookie->lock);
37470 radix_tree_preload_end();
37471 - fscache_stat(&fscache_n_store_ops);
37472 - fscache_stat(&fscache_n_stores_ok);
37473 + fscache_stat_unchecked(&fscache_n_store_ops);
37474 + fscache_stat_unchecked(&fscache_n_stores_ok);
37475
37476 /* the work queue now carries its own ref on the object */
37477 fscache_put_operation(&op->op);
37478 @@ -838,14 +838,14 @@ int __fscache_write_page(struct fscache_
37479 return 0;
37480
37481 already_queued:
37482 - fscache_stat(&fscache_n_stores_again);
37483 + fscache_stat_unchecked(&fscache_n_stores_again);
37484 already_pending:
37485 spin_unlock(&cookie->stores_lock);
37486 spin_unlock(&object->lock);
37487 spin_unlock(&cookie->lock);
37488 radix_tree_preload_end();
37489 kfree(op);
37490 - fscache_stat(&fscache_n_stores_ok);
37491 + fscache_stat_unchecked(&fscache_n_stores_ok);
37492 _leave(" = 0");
37493 return 0;
37494
37495 @@ -864,14 +864,14 @@ nobufs:
37496 spin_unlock(&cookie->lock);
37497 radix_tree_preload_end();
37498 kfree(op);
37499 - fscache_stat(&fscache_n_stores_nobufs);
37500 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
37501 _leave(" = -ENOBUFS");
37502 return -ENOBUFS;
37503
37504 nomem_free:
37505 kfree(op);
37506 nomem:
37507 - fscache_stat(&fscache_n_stores_oom);
37508 + fscache_stat_unchecked(&fscache_n_stores_oom);
37509 _leave(" = -ENOMEM");
37510 return -ENOMEM;
37511 }
37512 @@ -889,7 +889,7 @@ void __fscache_uncache_page(struct fscac
37513 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37514 ASSERTCMP(page, !=, NULL);
37515
37516 - fscache_stat(&fscache_n_uncaches);
37517 + fscache_stat_unchecked(&fscache_n_uncaches);
37518
37519 /* cache withdrawal may beat us to it */
37520 if (!PageFsCache(page))
37521 @@ -942,7 +942,7 @@ void fscache_mark_pages_cached(struct fs
37522 unsigned long loop;
37523
37524 #ifdef CONFIG_FSCACHE_STATS
37525 - atomic_add(pagevec->nr, &fscache_n_marks);
37526 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37527 #endif
37528
37529 for (loop = 0; loop < pagevec->nr; loop++) {
37530 diff -urNp linux-2.6.39.4/fs/fscache/stats.c linux-2.6.39.4/fs/fscache/stats.c
37531 --- linux-2.6.39.4/fs/fscache/stats.c 2011-05-19 00:06:34.000000000 -0400
37532 +++ linux-2.6.39.4/fs/fscache/stats.c 2011-08-05 19:44:37.000000000 -0400
37533 @@ -18,95 +18,95 @@
37534 /*
37535 * operation counters
37536 */
37537 -atomic_t fscache_n_op_pend;
37538 -atomic_t fscache_n_op_run;
37539 -atomic_t fscache_n_op_enqueue;
37540 -atomic_t fscache_n_op_requeue;
37541 -atomic_t fscache_n_op_deferred_release;
37542 -atomic_t fscache_n_op_release;
37543 -atomic_t fscache_n_op_gc;
37544 -atomic_t fscache_n_op_cancelled;
37545 -atomic_t fscache_n_op_rejected;
37546 -
37547 -atomic_t fscache_n_attr_changed;
37548 -atomic_t fscache_n_attr_changed_ok;
37549 -atomic_t fscache_n_attr_changed_nobufs;
37550 -atomic_t fscache_n_attr_changed_nomem;
37551 -atomic_t fscache_n_attr_changed_calls;
37552 -
37553 -atomic_t fscache_n_allocs;
37554 -atomic_t fscache_n_allocs_ok;
37555 -atomic_t fscache_n_allocs_wait;
37556 -atomic_t fscache_n_allocs_nobufs;
37557 -atomic_t fscache_n_allocs_intr;
37558 -atomic_t fscache_n_allocs_object_dead;
37559 -atomic_t fscache_n_alloc_ops;
37560 -atomic_t fscache_n_alloc_op_waits;
37561 -
37562 -atomic_t fscache_n_retrievals;
37563 -atomic_t fscache_n_retrievals_ok;
37564 -atomic_t fscache_n_retrievals_wait;
37565 -atomic_t fscache_n_retrievals_nodata;
37566 -atomic_t fscache_n_retrievals_nobufs;
37567 -atomic_t fscache_n_retrievals_intr;
37568 -atomic_t fscache_n_retrievals_nomem;
37569 -atomic_t fscache_n_retrievals_object_dead;
37570 -atomic_t fscache_n_retrieval_ops;
37571 -atomic_t fscache_n_retrieval_op_waits;
37572 -
37573 -atomic_t fscache_n_stores;
37574 -atomic_t fscache_n_stores_ok;
37575 -atomic_t fscache_n_stores_again;
37576 -atomic_t fscache_n_stores_nobufs;
37577 -atomic_t fscache_n_stores_oom;
37578 -atomic_t fscache_n_store_ops;
37579 -atomic_t fscache_n_store_calls;
37580 -atomic_t fscache_n_store_pages;
37581 -atomic_t fscache_n_store_radix_deletes;
37582 -atomic_t fscache_n_store_pages_over_limit;
37583 -
37584 -atomic_t fscache_n_store_vmscan_not_storing;
37585 -atomic_t fscache_n_store_vmscan_gone;
37586 -atomic_t fscache_n_store_vmscan_busy;
37587 -atomic_t fscache_n_store_vmscan_cancelled;
37588 -
37589 -atomic_t fscache_n_marks;
37590 -atomic_t fscache_n_uncaches;
37591 -
37592 -atomic_t fscache_n_acquires;
37593 -atomic_t fscache_n_acquires_null;
37594 -atomic_t fscache_n_acquires_no_cache;
37595 -atomic_t fscache_n_acquires_ok;
37596 -atomic_t fscache_n_acquires_nobufs;
37597 -atomic_t fscache_n_acquires_oom;
37598 -
37599 -atomic_t fscache_n_updates;
37600 -atomic_t fscache_n_updates_null;
37601 -atomic_t fscache_n_updates_run;
37602 -
37603 -atomic_t fscache_n_relinquishes;
37604 -atomic_t fscache_n_relinquishes_null;
37605 -atomic_t fscache_n_relinquishes_waitcrt;
37606 -atomic_t fscache_n_relinquishes_retire;
37607 -
37608 -atomic_t fscache_n_cookie_index;
37609 -atomic_t fscache_n_cookie_data;
37610 -atomic_t fscache_n_cookie_special;
37611 -
37612 -atomic_t fscache_n_object_alloc;
37613 -atomic_t fscache_n_object_no_alloc;
37614 -atomic_t fscache_n_object_lookups;
37615 -atomic_t fscache_n_object_lookups_negative;
37616 -atomic_t fscache_n_object_lookups_positive;
37617 -atomic_t fscache_n_object_lookups_timed_out;
37618 -atomic_t fscache_n_object_created;
37619 -atomic_t fscache_n_object_avail;
37620 -atomic_t fscache_n_object_dead;
37621 -
37622 -atomic_t fscache_n_checkaux_none;
37623 -atomic_t fscache_n_checkaux_okay;
37624 -atomic_t fscache_n_checkaux_update;
37625 -atomic_t fscache_n_checkaux_obsolete;
37626 +atomic_unchecked_t fscache_n_op_pend;
37627 +atomic_unchecked_t fscache_n_op_run;
37628 +atomic_unchecked_t fscache_n_op_enqueue;
37629 +atomic_unchecked_t fscache_n_op_requeue;
37630 +atomic_unchecked_t fscache_n_op_deferred_release;
37631 +atomic_unchecked_t fscache_n_op_release;
37632 +atomic_unchecked_t fscache_n_op_gc;
37633 +atomic_unchecked_t fscache_n_op_cancelled;
37634 +atomic_unchecked_t fscache_n_op_rejected;
37635 +
37636 +atomic_unchecked_t fscache_n_attr_changed;
37637 +atomic_unchecked_t fscache_n_attr_changed_ok;
37638 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
37639 +atomic_unchecked_t fscache_n_attr_changed_nomem;
37640 +atomic_unchecked_t fscache_n_attr_changed_calls;
37641 +
37642 +atomic_unchecked_t fscache_n_allocs;
37643 +atomic_unchecked_t fscache_n_allocs_ok;
37644 +atomic_unchecked_t fscache_n_allocs_wait;
37645 +atomic_unchecked_t fscache_n_allocs_nobufs;
37646 +atomic_unchecked_t fscache_n_allocs_intr;
37647 +atomic_unchecked_t fscache_n_allocs_object_dead;
37648 +atomic_unchecked_t fscache_n_alloc_ops;
37649 +atomic_unchecked_t fscache_n_alloc_op_waits;
37650 +
37651 +atomic_unchecked_t fscache_n_retrievals;
37652 +atomic_unchecked_t fscache_n_retrievals_ok;
37653 +atomic_unchecked_t fscache_n_retrievals_wait;
37654 +atomic_unchecked_t fscache_n_retrievals_nodata;
37655 +atomic_unchecked_t fscache_n_retrievals_nobufs;
37656 +atomic_unchecked_t fscache_n_retrievals_intr;
37657 +atomic_unchecked_t fscache_n_retrievals_nomem;
37658 +atomic_unchecked_t fscache_n_retrievals_object_dead;
37659 +atomic_unchecked_t fscache_n_retrieval_ops;
37660 +atomic_unchecked_t fscache_n_retrieval_op_waits;
37661 +
37662 +atomic_unchecked_t fscache_n_stores;
37663 +atomic_unchecked_t fscache_n_stores_ok;
37664 +atomic_unchecked_t fscache_n_stores_again;
37665 +atomic_unchecked_t fscache_n_stores_nobufs;
37666 +atomic_unchecked_t fscache_n_stores_oom;
37667 +atomic_unchecked_t fscache_n_store_ops;
37668 +atomic_unchecked_t fscache_n_store_calls;
37669 +atomic_unchecked_t fscache_n_store_pages;
37670 +atomic_unchecked_t fscache_n_store_radix_deletes;
37671 +atomic_unchecked_t fscache_n_store_pages_over_limit;
37672 +
37673 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37674 +atomic_unchecked_t fscache_n_store_vmscan_gone;
37675 +atomic_unchecked_t fscache_n_store_vmscan_busy;
37676 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37677 +
37678 +atomic_unchecked_t fscache_n_marks;
37679 +atomic_unchecked_t fscache_n_uncaches;
37680 +
37681 +atomic_unchecked_t fscache_n_acquires;
37682 +atomic_unchecked_t fscache_n_acquires_null;
37683 +atomic_unchecked_t fscache_n_acquires_no_cache;
37684 +atomic_unchecked_t fscache_n_acquires_ok;
37685 +atomic_unchecked_t fscache_n_acquires_nobufs;
37686 +atomic_unchecked_t fscache_n_acquires_oom;
37687 +
37688 +atomic_unchecked_t fscache_n_updates;
37689 +atomic_unchecked_t fscache_n_updates_null;
37690 +atomic_unchecked_t fscache_n_updates_run;
37691 +
37692 +atomic_unchecked_t fscache_n_relinquishes;
37693 +atomic_unchecked_t fscache_n_relinquishes_null;
37694 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37695 +atomic_unchecked_t fscache_n_relinquishes_retire;
37696 +
37697 +atomic_unchecked_t fscache_n_cookie_index;
37698 +atomic_unchecked_t fscache_n_cookie_data;
37699 +atomic_unchecked_t fscache_n_cookie_special;
37700 +
37701 +atomic_unchecked_t fscache_n_object_alloc;
37702 +atomic_unchecked_t fscache_n_object_no_alloc;
37703 +atomic_unchecked_t fscache_n_object_lookups;
37704 +atomic_unchecked_t fscache_n_object_lookups_negative;
37705 +atomic_unchecked_t fscache_n_object_lookups_positive;
37706 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
37707 +atomic_unchecked_t fscache_n_object_created;
37708 +atomic_unchecked_t fscache_n_object_avail;
37709 +atomic_unchecked_t fscache_n_object_dead;
37710 +
37711 +atomic_unchecked_t fscache_n_checkaux_none;
37712 +atomic_unchecked_t fscache_n_checkaux_okay;
37713 +atomic_unchecked_t fscache_n_checkaux_update;
37714 +atomic_unchecked_t fscache_n_checkaux_obsolete;
37715
37716 atomic_t fscache_n_cop_alloc_object;
37717 atomic_t fscache_n_cop_lookup_object;
37718 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37719 seq_puts(m, "FS-Cache statistics\n");
37720
37721 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37722 - atomic_read(&fscache_n_cookie_index),
37723 - atomic_read(&fscache_n_cookie_data),
37724 - atomic_read(&fscache_n_cookie_special));
37725 + atomic_read_unchecked(&fscache_n_cookie_index),
37726 + atomic_read_unchecked(&fscache_n_cookie_data),
37727 + atomic_read_unchecked(&fscache_n_cookie_special));
37728
37729 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37730 - atomic_read(&fscache_n_object_alloc),
37731 - atomic_read(&fscache_n_object_no_alloc),
37732 - atomic_read(&fscache_n_object_avail),
37733 - atomic_read(&fscache_n_object_dead));
37734 + atomic_read_unchecked(&fscache_n_object_alloc),
37735 + atomic_read_unchecked(&fscache_n_object_no_alloc),
37736 + atomic_read_unchecked(&fscache_n_object_avail),
37737 + atomic_read_unchecked(&fscache_n_object_dead));
37738 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37739 - atomic_read(&fscache_n_checkaux_none),
37740 - atomic_read(&fscache_n_checkaux_okay),
37741 - atomic_read(&fscache_n_checkaux_update),
37742 - atomic_read(&fscache_n_checkaux_obsolete));
37743 + atomic_read_unchecked(&fscache_n_checkaux_none),
37744 + atomic_read_unchecked(&fscache_n_checkaux_okay),
37745 + atomic_read_unchecked(&fscache_n_checkaux_update),
37746 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37747
37748 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37749 - atomic_read(&fscache_n_marks),
37750 - atomic_read(&fscache_n_uncaches));
37751 + atomic_read_unchecked(&fscache_n_marks),
37752 + atomic_read_unchecked(&fscache_n_uncaches));
37753
37754 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37755 " oom=%u\n",
37756 - atomic_read(&fscache_n_acquires),
37757 - atomic_read(&fscache_n_acquires_null),
37758 - atomic_read(&fscache_n_acquires_no_cache),
37759 - atomic_read(&fscache_n_acquires_ok),
37760 - atomic_read(&fscache_n_acquires_nobufs),
37761 - atomic_read(&fscache_n_acquires_oom));
37762 + atomic_read_unchecked(&fscache_n_acquires),
37763 + atomic_read_unchecked(&fscache_n_acquires_null),
37764 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
37765 + atomic_read_unchecked(&fscache_n_acquires_ok),
37766 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
37767 + atomic_read_unchecked(&fscache_n_acquires_oom));
37768
37769 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37770 - atomic_read(&fscache_n_object_lookups),
37771 - atomic_read(&fscache_n_object_lookups_negative),
37772 - atomic_read(&fscache_n_object_lookups_positive),
37773 - atomic_read(&fscache_n_object_created),
37774 - atomic_read(&fscache_n_object_lookups_timed_out));
37775 + atomic_read_unchecked(&fscache_n_object_lookups),
37776 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
37777 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
37778 + atomic_read_unchecked(&fscache_n_object_created),
37779 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37780
37781 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37782 - atomic_read(&fscache_n_updates),
37783 - atomic_read(&fscache_n_updates_null),
37784 - atomic_read(&fscache_n_updates_run));
37785 + atomic_read_unchecked(&fscache_n_updates),
37786 + atomic_read_unchecked(&fscache_n_updates_null),
37787 + atomic_read_unchecked(&fscache_n_updates_run));
37788
37789 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37790 - atomic_read(&fscache_n_relinquishes),
37791 - atomic_read(&fscache_n_relinquishes_null),
37792 - atomic_read(&fscache_n_relinquishes_waitcrt),
37793 - atomic_read(&fscache_n_relinquishes_retire));
37794 + atomic_read_unchecked(&fscache_n_relinquishes),
37795 + atomic_read_unchecked(&fscache_n_relinquishes_null),
37796 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37797 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
37798
37799 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37800 - atomic_read(&fscache_n_attr_changed),
37801 - atomic_read(&fscache_n_attr_changed_ok),
37802 - atomic_read(&fscache_n_attr_changed_nobufs),
37803 - atomic_read(&fscache_n_attr_changed_nomem),
37804 - atomic_read(&fscache_n_attr_changed_calls));
37805 + atomic_read_unchecked(&fscache_n_attr_changed),
37806 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
37807 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37808 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37809 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
37810
37811 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37812 - atomic_read(&fscache_n_allocs),
37813 - atomic_read(&fscache_n_allocs_ok),
37814 - atomic_read(&fscache_n_allocs_wait),
37815 - atomic_read(&fscache_n_allocs_nobufs),
37816 - atomic_read(&fscache_n_allocs_intr));
37817 + atomic_read_unchecked(&fscache_n_allocs),
37818 + atomic_read_unchecked(&fscache_n_allocs_ok),
37819 + atomic_read_unchecked(&fscache_n_allocs_wait),
37820 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
37821 + atomic_read_unchecked(&fscache_n_allocs_intr));
37822 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37823 - atomic_read(&fscache_n_alloc_ops),
37824 - atomic_read(&fscache_n_alloc_op_waits),
37825 - atomic_read(&fscache_n_allocs_object_dead));
37826 + atomic_read_unchecked(&fscache_n_alloc_ops),
37827 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
37828 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
37829
37830 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37831 " int=%u oom=%u\n",
37832 - atomic_read(&fscache_n_retrievals),
37833 - atomic_read(&fscache_n_retrievals_ok),
37834 - atomic_read(&fscache_n_retrievals_wait),
37835 - atomic_read(&fscache_n_retrievals_nodata),
37836 - atomic_read(&fscache_n_retrievals_nobufs),
37837 - atomic_read(&fscache_n_retrievals_intr),
37838 - atomic_read(&fscache_n_retrievals_nomem));
37839 + atomic_read_unchecked(&fscache_n_retrievals),
37840 + atomic_read_unchecked(&fscache_n_retrievals_ok),
37841 + atomic_read_unchecked(&fscache_n_retrievals_wait),
37842 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
37843 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37844 + atomic_read_unchecked(&fscache_n_retrievals_intr),
37845 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
37846 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37847 - atomic_read(&fscache_n_retrieval_ops),
37848 - atomic_read(&fscache_n_retrieval_op_waits),
37849 - atomic_read(&fscache_n_retrievals_object_dead));
37850 + atomic_read_unchecked(&fscache_n_retrieval_ops),
37851 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37852 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37853
37854 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37855 - atomic_read(&fscache_n_stores),
37856 - atomic_read(&fscache_n_stores_ok),
37857 - atomic_read(&fscache_n_stores_again),
37858 - atomic_read(&fscache_n_stores_nobufs),
37859 - atomic_read(&fscache_n_stores_oom));
37860 + atomic_read_unchecked(&fscache_n_stores),
37861 + atomic_read_unchecked(&fscache_n_stores_ok),
37862 + atomic_read_unchecked(&fscache_n_stores_again),
37863 + atomic_read_unchecked(&fscache_n_stores_nobufs),
37864 + atomic_read_unchecked(&fscache_n_stores_oom));
37865 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37866 - atomic_read(&fscache_n_store_ops),
37867 - atomic_read(&fscache_n_store_calls),
37868 - atomic_read(&fscache_n_store_pages),
37869 - atomic_read(&fscache_n_store_radix_deletes),
37870 - atomic_read(&fscache_n_store_pages_over_limit));
37871 + atomic_read_unchecked(&fscache_n_store_ops),
37872 + atomic_read_unchecked(&fscache_n_store_calls),
37873 + atomic_read_unchecked(&fscache_n_store_pages),
37874 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
37875 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37876
37877 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37878 - atomic_read(&fscache_n_store_vmscan_not_storing),
37879 - atomic_read(&fscache_n_store_vmscan_gone),
37880 - atomic_read(&fscache_n_store_vmscan_busy),
37881 - atomic_read(&fscache_n_store_vmscan_cancelled));
37882 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37883 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37884 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37885 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37886
37887 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37888 - atomic_read(&fscache_n_op_pend),
37889 - atomic_read(&fscache_n_op_run),
37890 - atomic_read(&fscache_n_op_enqueue),
37891 - atomic_read(&fscache_n_op_cancelled),
37892 - atomic_read(&fscache_n_op_rejected));
37893 + atomic_read_unchecked(&fscache_n_op_pend),
37894 + atomic_read_unchecked(&fscache_n_op_run),
37895 + atomic_read_unchecked(&fscache_n_op_enqueue),
37896 + atomic_read_unchecked(&fscache_n_op_cancelled),
37897 + atomic_read_unchecked(&fscache_n_op_rejected));
37898 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37899 - atomic_read(&fscache_n_op_deferred_release),
37900 - atomic_read(&fscache_n_op_release),
37901 - atomic_read(&fscache_n_op_gc));
37902 + atomic_read_unchecked(&fscache_n_op_deferred_release),
37903 + atomic_read_unchecked(&fscache_n_op_release),
37904 + atomic_read_unchecked(&fscache_n_op_gc));
37905
37906 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37907 atomic_read(&fscache_n_cop_alloc_object),
37908 diff -urNp linux-2.6.39.4/fs/fs_struct.c linux-2.6.39.4/fs/fs_struct.c
37909 --- linux-2.6.39.4/fs/fs_struct.c 2011-05-19 00:06:34.000000000 -0400
37910 +++ linux-2.6.39.4/fs/fs_struct.c 2011-08-05 19:44:37.000000000 -0400
37911 @@ -4,6 +4,7 @@
37912 #include <linux/path.h>
37913 #include <linux/slab.h>
37914 #include <linux/fs_struct.h>
37915 +#include <linux/grsecurity.h>
37916 #include "internal.h"
37917
37918 static inline void path_get_longterm(struct path *path)
37919 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37920 old_root = fs->root;
37921 fs->root = *path;
37922 path_get_longterm(path);
37923 + gr_set_chroot_entries(current, path);
37924 write_seqcount_end(&fs->seq);
37925 spin_unlock(&fs->lock);
37926 if (old_root.dentry)
37927 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37928 && fs->root.mnt == old_root->mnt) {
37929 path_get_longterm(new_root);
37930 fs->root = *new_root;
37931 + gr_set_chroot_entries(p, new_root);
37932 count++;
37933 }
37934 if (fs->pwd.dentry == old_root->dentry
37935 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
37936 spin_lock(&fs->lock);
37937 write_seqcount_begin(&fs->seq);
37938 tsk->fs = NULL;
37939 - kill = !--fs->users;
37940 + gr_clear_chroot_entries(tsk);
37941 + kill = !atomic_dec_return(&fs->users);
37942 write_seqcount_end(&fs->seq);
37943 spin_unlock(&fs->lock);
37944 task_unlock(tsk);
37945 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
37946 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
37947 /* We don't need to lock fs - think why ;-) */
37948 if (fs) {
37949 - fs->users = 1;
37950 + atomic_set(&fs->users, 1);
37951 fs->in_exec = 0;
37952 spin_lock_init(&fs->lock);
37953 seqcount_init(&fs->seq);
37954 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
37955 spin_lock(&old->lock);
37956 fs->root = old->root;
37957 path_get_longterm(&fs->root);
37958 + /* instead of calling gr_set_chroot_entries here,
37959 + we call it from every caller of this function
37960 + */
37961 fs->pwd = old->pwd;
37962 path_get_longterm(&fs->pwd);
37963 spin_unlock(&old->lock);
37964 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
37965
37966 task_lock(current);
37967 spin_lock(&fs->lock);
37968 - kill = !--fs->users;
37969 + kill = !atomic_dec_return(&fs->users);
37970 current->fs = new_fs;
37971 + gr_set_chroot_entries(current, &new_fs->root);
37972 spin_unlock(&fs->lock);
37973 task_unlock(current);
37974
37975 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
37976
37977 /* to be mentioned only in INIT_TASK */
37978 struct fs_struct init_fs = {
37979 - .users = 1,
37980 + .users = ATOMIC_INIT(1),
37981 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
37982 .seq = SEQCNT_ZERO,
37983 .umask = 0022,
37984 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
37985 task_lock(current);
37986
37987 spin_lock(&init_fs.lock);
37988 - init_fs.users++;
37989 + atomic_inc(&init_fs.users);
37990 spin_unlock(&init_fs.lock);
37991
37992 spin_lock(&fs->lock);
37993 current->fs = &init_fs;
37994 - kill = !--fs->users;
37995 + gr_set_chroot_entries(current, &current->fs->root);
37996 + kill = !atomic_dec_return(&fs->users);
37997 spin_unlock(&fs->lock);
37998
37999 task_unlock(current);
38000 diff -urNp linux-2.6.39.4/fs/fuse/cuse.c linux-2.6.39.4/fs/fuse/cuse.c
38001 --- linux-2.6.39.4/fs/fuse/cuse.c 2011-05-19 00:06:34.000000000 -0400
38002 +++ linux-2.6.39.4/fs/fuse/cuse.c 2011-08-05 20:34:06.000000000 -0400
38003 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
38004 INIT_LIST_HEAD(&cuse_conntbl[i]);
38005
38006 /* inherit and extend fuse_dev_operations */
38007 - cuse_channel_fops = fuse_dev_operations;
38008 - cuse_channel_fops.owner = THIS_MODULE;
38009 - cuse_channel_fops.open = cuse_channel_open;
38010 - cuse_channel_fops.release = cuse_channel_release;
38011 + pax_open_kernel();
38012 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
38013 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
38014 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
38015 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
38016 + pax_close_kernel();
38017
38018 cuse_class = class_create(THIS_MODULE, "cuse");
38019 if (IS_ERR(cuse_class))
38020 diff -urNp linux-2.6.39.4/fs/fuse/dev.c linux-2.6.39.4/fs/fuse/dev.c
38021 --- linux-2.6.39.4/fs/fuse/dev.c 2011-05-19 00:06:34.000000000 -0400
38022 +++ linux-2.6.39.4/fs/fuse/dev.c 2011-08-05 20:34:06.000000000 -0400
38023 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
38024 ret = 0;
38025 pipe_lock(pipe);
38026
38027 - if (!pipe->readers) {
38028 + if (!atomic_read(&pipe->readers)) {
38029 send_sig(SIGPIPE, current, 0);
38030 if (!ret)
38031 ret = -EPIPE;
38032 diff -urNp linux-2.6.39.4/fs/fuse/dir.c linux-2.6.39.4/fs/fuse/dir.c
38033 --- linux-2.6.39.4/fs/fuse/dir.c 2011-05-19 00:06:34.000000000 -0400
38034 +++ linux-2.6.39.4/fs/fuse/dir.c 2011-08-05 19:44:37.000000000 -0400
38035 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
38036 return link;
38037 }
38038
38039 -static void free_link(char *link)
38040 +static void free_link(const char *link)
38041 {
38042 if (!IS_ERR(link))
38043 free_page((unsigned long) link);
38044 diff -urNp linux-2.6.39.4/fs/gfs2/ops_inode.c linux-2.6.39.4/fs/gfs2/ops_inode.c
38045 --- linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-05-19 00:06:34.000000000 -0400
38046 +++ linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-08-05 19:44:37.000000000 -0400
38047 @@ -740,6 +740,8 @@ static int gfs2_rename(struct inode *odi
38048 unsigned int x;
38049 int error;
38050
38051 + pax_track_stack();
38052 +
38053 if (ndentry->d_inode) {
38054 nip = GFS2_I(ndentry->d_inode);
38055 if (ip == nip)
38056 @@ -1019,7 +1021,7 @@ out:
38057
38058 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38059 {
38060 - char *s = nd_get_link(nd);
38061 + const char *s = nd_get_link(nd);
38062 if (!IS_ERR(s))
38063 kfree(s);
38064 }
38065 diff -urNp linux-2.6.39.4/fs/hfsplus/catalog.c linux-2.6.39.4/fs/hfsplus/catalog.c
38066 --- linux-2.6.39.4/fs/hfsplus/catalog.c 2011-05-19 00:06:34.000000000 -0400
38067 +++ linux-2.6.39.4/fs/hfsplus/catalog.c 2011-08-05 19:44:37.000000000 -0400
38068 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38069 int err;
38070 u16 type;
38071
38072 + pax_track_stack();
38073 +
38074 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38075 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38076 if (err)
38077 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38078 int entry_size;
38079 int err;
38080
38081 + pax_track_stack();
38082 +
38083 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38084 str->name, cnid, inode->i_nlink);
38085 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38086 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38087 int entry_size, type;
38088 int err = 0;
38089
38090 + pax_track_stack();
38091 +
38092 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38093 cnid, src_dir->i_ino, src_name->name,
38094 dst_dir->i_ino, dst_name->name);
38095 diff -urNp linux-2.6.39.4/fs/hfsplus/dir.c linux-2.6.39.4/fs/hfsplus/dir.c
38096 --- linux-2.6.39.4/fs/hfsplus/dir.c 2011-05-19 00:06:34.000000000 -0400
38097 +++ linux-2.6.39.4/fs/hfsplus/dir.c 2011-08-05 19:44:37.000000000 -0400
38098 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38099 struct hfsplus_readdir_data *rd;
38100 u16 type;
38101
38102 + pax_track_stack();
38103 +
38104 if (filp->f_pos >= inode->i_size)
38105 return 0;
38106
38107 diff -urNp linux-2.6.39.4/fs/hfsplus/inode.c linux-2.6.39.4/fs/hfsplus/inode.c
38108 --- linux-2.6.39.4/fs/hfsplus/inode.c 2011-05-19 00:06:34.000000000 -0400
38109 +++ linux-2.6.39.4/fs/hfsplus/inode.c 2011-08-05 19:44:37.000000000 -0400
38110 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38111 int res = 0;
38112 u16 type;
38113
38114 + pax_track_stack();
38115 +
38116 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38117
38118 HFSPLUS_I(inode)->linkid = 0;
38119 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38120 struct hfs_find_data fd;
38121 hfsplus_cat_entry entry;
38122
38123 + pax_track_stack();
38124 +
38125 if (HFSPLUS_IS_RSRC(inode))
38126 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38127
38128 diff -urNp linux-2.6.39.4/fs/hfsplus/ioctl.c linux-2.6.39.4/fs/hfsplus/ioctl.c
38129 --- linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-05-19 00:06:34.000000000 -0400
38130 +++ linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-08-05 19:44:37.000000000 -0400
38131 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38132 struct hfsplus_cat_file *file;
38133 int res;
38134
38135 + pax_track_stack();
38136 +
38137 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38138 return -EOPNOTSUPP;
38139
38140 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38141 struct hfsplus_cat_file *file;
38142 ssize_t res = 0;
38143
38144 + pax_track_stack();
38145 +
38146 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38147 return -EOPNOTSUPP;
38148
38149 diff -urNp linux-2.6.39.4/fs/hfsplus/super.c linux-2.6.39.4/fs/hfsplus/super.c
38150 --- linux-2.6.39.4/fs/hfsplus/super.c 2011-05-19 00:06:34.000000000 -0400
38151 +++ linux-2.6.39.4/fs/hfsplus/super.c 2011-08-05 19:44:37.000000000 -0400
38152 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38153 struct nls_table *nls = NULL;
38154 int err;
38155
38156 + pax_track_stack();
38157 +
38158 err = -EINVAL;
38159 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38160 if (!sbi)
38161 diff -urNp linux-2.6.39.4/fs/hugetlbfs/inode.c linux-2.6.39.4/fs/hugetlbfs/inode.c
38162 --- linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-05-19 00:06:34.000000000 -0400
38163 +++ linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38164 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38165 .kill_sb = kill_litter_super,
38166 };
38167
38168 -static struct vfsmount *hugetlbfs_vfsmount;
38169 +struct vfsmount *hugetlbfs_vfsmount;
38170
38171 static int can_do_hugetlb_shm(void)
38172 {
38173 diff -urNp linux-2.6.39.4/fs/inode.c linux-2.6.39.4/fs/inode.c
38174 --- linux-2.6.39.4/fs/inode.c 2011-05-19 00:06:34.000000000 -0400
38175 +++ linux-2.6.39.4/fs/inode.c 2011-08-05 19:44:37.000000000 -0400
38176 @@ -862,8 +862,8 @@ unsigned int get_next_ino(void)
38177
38178 #ifdef CONFIG_SMP
38179 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38180 - static atomic_t shared_last_ino;
38181 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38182 + static atomic_unchecked_t shared_last_ino;
38183 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38184
38185 res = next - LAST_INO_BATCH;
38186 }
38187 diff -urNp linux-2.6.39.4/fs/jbd/checkpoint.c linux-2.6.39.4/fs/jbd/checkpoint.c
38188 --- linux-2.6.39.4/fs/jbd/checkpoint.c 2011-05-19 00:06:34.000000000 -0400
38189 +++ linux-2.6.39.4/fs/jbd/checkpoint.c 2011-08-05 19:44:37.000000000 -0400
38190 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38191 tid_t this_tid;
38192 int result;
38193
38194 + pax_track_stack();
38195 +
38196 jbd_debug(1, "Start checkpoint\n");
38197
38198 /*
38199 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rtime.c linux-2.6.39.4/fs/jffs2/compr_rtime.c
38200 --- linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-05-19 00:06:34.000000000 -0400
38201 +++ linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-08-05 19:44:37.000000000 -0400
38202 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38203 int outpos = 0;
38204 int pos=0;
38205
38206 + pax_track_stack();
38207 +
38208 memset(positions,0,sizeof(positions));
38209
38210 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38211 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38212 int outpos = 0;
38213 int pos=0;
38214
38215 + pax_track_stack();
38216 +
38217 memset(positions,0,sizeof(positions));
38218
38219 while (outpos<destlen) {
38220 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rubin.c linux-2.6.39.4/fs/jffs2/compr_rubin.c
38221 --- linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-05-19 00:06:34.000000000 -0400
38222 +++ linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-08-05 19:44:37.000000000 -0400
38223 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38224 int ret;
38225 uint32_t mysrclen, mydstlen;
38226
38227 + pax_track_stack();
38228 +
38229 mysrclen = *sourcelen;
38230 mydstlen = *dstlen - 8;
38231
38232 diff -urNp linux-2.6.39.4/fs/jffs2/erase.c linux-2.6.39.4/fs/jffs2/erase.c
38233 --- linux-2.6.39.4/fs/jffs2/erase.c 2011-05-19 00:06:34.000000000 -0400
38234 +++ linux-2.6.39.4/fs/jffs2/erase.c 2011-08-05 19:44:37.000000000 -0400
38235 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38236 struct jffs2_unknown_node marker = {
38237 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38238 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38239 - .totlen = cpu_to_je32(c->cleanmarker_size)
38240 + .totlen = cpu_to_je32(c->cleanmarker_size),
38241 + .hdr_crc = cpu_to_je32(0)
38242 };
38243
38244 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38245 diff -urNp linux-2.6.39.4/fs/jffs2/wbuf.c linux-2.6.39.4/fs/jffs2/wbuf.c
38246 --- linux-2.6.39.4/fs/jffs2/wbuf.c 2011-05-19 00:06:34.000000000 -0400
38247 +++ linux-2.6.39.4/fs/jffs2/wbuf.c 2011-08-05 19:44:37.000000000 -0400
38248 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38249 {
38250 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38251 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38252 - .totlen = constant_cpu_to_je32(8)
38253 + .totlen = constant_cpu_to_je32(8),
38254 + .hdr_crc = constant_cpu_to_je32(0)
38255 };
38256
38257 /*
38258 diff -urNp linux-2.6.39.4/fs/jffs2/xattr.c linux-2.6.39.4/fs/jffs2/xattr.c
38259 --- linux-2.6.39.4/fs/jffs2/xattr.c 2011-05-19 00:06:34.000000000 -0400
38260 +++ linux-2.6.39.4/fs/jffs2/xattr.c 2011-08-05 19:44:37.000000000 -0400
38261 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38262
38263 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38264
38265 + pax_track_stack();
38266 +
38267 /* Phase.1 : Merge same xref */
38268 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38269 xref_tmphash[i] = NULL;
38270 diff -urNp linux-2.6.39.4/fs/jfs/super.c linux-2.6.39.4/fs/jfs/super.c
38271 --- linux-2.6.39.4/fs/jfs/super.c 2011-05-19 00:06:34.000000000 -0400
38272 +++ linux-2.6.39.4/fs/jfs/super.c 2011-08-05 19:44:37.000000000 -0400
38273 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38274
38275 jfs_inode_cachep =
38276 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38277 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38278 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38279 init_once);
38280 if (jfs_inode_cachep == NULL)
38281 return -ENOMEM;
38282 diff -urNp linux-2.6.39.4/fs/Kconfig.binfmt linux-2.6.39.4/fs/Kconfig.binfmt
38283 --- linux-2.6.39.4/fs/Kconfig.binfmt 2011-05-19 00:06:34.000000000 -0400
38284 +++ linux-2.6.39.4/fs/Kconfig.binfmt 2011-08-05 19:44:37.000000000 -0400
38285 @@ -86,7 +86,7 @@ config HAVE_AOUT
38286
38287 config BINFMT_AOUT
38288 tristate "Kernel support for a.out and ECOFF binaries"
38289 - depends on HAVE_AOUT
38290 + depends on HAVE_AOUT && BROKEN
38291 ---help---
38292 A.out (Assembler.OUTput) is a set of formats for libraries and
38293 executables used in the earliest versions of UNIX. Linux used
38294 diff -urNp linux-2.6.39.4/fs/libfs.c linux-2.6.39.4/fs/libfs.c
38295 --- linux-2.6.39.4/fs/libfs.c 2011-05-19 00:06:34.000000000 -0400
38296 +++ linux-2.6.39.4/fs/libfs.c 2011-08-05 19:44:37.000000000 -0400
38297 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38298
38299 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38300 struct dentry *next;
38301 + char d_name[sizeof(next->d_iname)];
38302 + const unsigned char *name;
38303 +
38304 next = list_entry(p, struct dentry, d_u.d_child);
38305 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38306 if (!simple_positive(next)) {
38307 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38308
38309 spin_unlock(&next->d_lock);
38310 spin_unlock(&dentry->d_lock);
38311 - if (filldir(dirent, next->d_name.name,
38312 + name = next->d_name.name;
38313 + if (name == next->d_iname) {
38314 + memcpy(d_name, name, next->d_name.len);
38315 + name = d_name;
38316 + }
38317 + if (filldir(dirent, name,
38318 next->d_name.len, filp->f_pos,
38319 next->d_inode->i_ino,
38320 dt_type(next->d_inode)) < 0)
38321 diff -urNp linux-2.6.39.4/fs/lockd/clntproc.c linux-2.6.39.4/fs/lockd/clntproc.c
38322 --- linux-2.6.39.4/fs/lockd/clntproc.c 2011-07-09 09:18:51.000000000 -0400
38323 +++ linux-2.6.39.4/fs/lockd/clntproc.c 2011-08-05 19:44:37.000000000 -0400
38324 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38325 /*
38326 * Cookie counter for NLM requests
38327 */
38328 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38329 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38330
38331 void nlmclnt_next_cookie(struct nlm_cookie *c)
38332 {
38333 - u32 cookie = atomic_inc_return(&nlm_cookie);
38334 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38335
38336 memcpy(c->data, &cookie, 4);
38337 c->len=4;
38338 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38339 struct nlm_rqst reqst, *req;
38340 int status;
38341
38342 + pax_track_stack();
38343 +
38344 req = &reqst;
38345 memset(req, 0, sizeof(*req));
38346 locks_init_lock(&req->a_args.lock.fl);
38347 diff -urNp linux-2.6.39.4/fs/locks.c linux-2.6.39.4/fs/locks.c
38348 --- linux-2.6.39.4/fs/locks.c 2011-07-09 09:18:51.000000000 -0400
38349 +++ linux-2.6.39.4/fs/locks.c 2011-08-05 19:44:37.000000000 -0400
38350 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38351 return;
38352
38353 if (filp->f_op && filp->f_op->flock) {
38354 - struct file_lock fl = {
38355 + struct file_lock flock = {
38356 .fl_pid = current->tgid,
38357 .fl_file = filp,
38358 .fl_flags = FL_FLOCK,
38359 .fl_type = F_UNLCK,
38360 .fl_end = OFFSET_MAX,
38361 };
38362 - filp->f_op->flock(filp, F_SETLKW, &fl);
38363 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
38364 - fl.fl_ops->fl_release_private(&fl);
38365 + filp->f_op->flock(filp, F_SETLKW, &flock);
38366 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
38367 + flock.fl_ops->fl_release_private(&flock);
38368 }
38369
38370 lock_flocks();
38371 diff -urNp linux-2.6.39.4/fs/logfs/super.c linux-2.6.39.4/fs/logfs/super.c
38372 --- linux-2.6.39.4/fs/logfs/super.c 2011-05-19 00:06:34.000000000 -0400
38373 +++ linux-2.6.39.4/fs/logfs/super.c 2011-08-05 19:44:37.000000000 -0400
38374 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38375 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38376 int err, valid0, valid1;
38377
38378 + pax_track_stack();
38379 +
38380 /* read first superblock */
38381 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38382 if (err)
38383 diff -urNp linux-2.6.39.4/fs/namei.c linux-2.6.39.4/fs/namei.c
38384 --- linux-2.6.39.4/fs/namei.c 2011-08-05 21:11:51.000000000 -0400
38385 +++ linux-2.6.39.4/fs/namei.c 2011-08-05 21:12:20.000000000 -0400
38386 @@ -237,20 +237,30 @@ int generic_permission(struct inode *ino
38387 return ret;
38388
38389 /*
38390 - * Read/write DACs are always overridable.
38391 - * Executable DACs are overridable if at least one exec bit is set.
38392 + * Searching includes executable on directories, else just read.
38393 */
38394 - if (!(mask & MAY_EXEC) || execute_ok(inode))
38395 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38396 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38397 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38398 +#ifdef CONFIG_GRKERNSEC
38399 + if (flags & IPERM_FLAG_RCU)
38400 + return -ECHILD;
38401 +#endif
38402 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38403 return 0;
38404 + }
38405
38406 /*
38407 - * Searching includes executable on directories, else just read.
38408 + * Read/write DACs are always overridable.
38409 + * Executable DACs are overridable if at least one exec bit is set.
38410 */
38411 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38412 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38413 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38414 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38415 +#ifdef CONFIG_GRKERNSEC
38416 + if (flags & IPERM_FLAG_RCU)
38417 + return -ECHILD;
38418 +#endif
38419 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38420 return 0;
38421 + }
38422
38423 return -EACCES;
38424 }
38425 @@ -626,6 +636,9 @@ static inline int handle_reval_path(stru
38426 struct dentry *dentry = nd->path.dentry;
38427 int status;
38428
38429 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38430 + return -ENOENT;
38431 +
38432 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38433 return 0;
38434
38435 @@ -671,9 +684,16 @@ static inline int exec_permission(struct
38436 if (ret == -ECHILD)
38437 return ret;
38438
38439 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38440 - ns_capable(ns, CAP_DAC_READ_SEARCH))
38441 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38442 goto ok;
38443 + else {
38444 +#ifdef CONFIG_GRKERNSEC
38445 + if (flags & IPERM_FLAG_RCU)
38446 + return -ECHILD;
38447 +#endif
38448 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38449 + goto ok;
38450 + }
38451
38452 return ret;
38453 ok:
38454 @@ -781,11 +801,19 @@ follow_link(struct path *link, struct na
38455 return error;
38456 }
38457
38458 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
38459 + dentry->d_inode, dentry, nd->path.mnt)) {
38460 + error = -EACCES;
38461 + *p = ERR_PTR(error); /* no ->put_link(), please */
38462 + path_put(&nd->path);
38463 + return error;
38464 + }
38465 +
38466 nd->last_type = LAST_BIND;
38467 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38468 error = PTR_ERR(*p);
38469 if (!IS_ERR(*p)) {
38470 - char *s = nd_get_link(nd);
38471 + const char *s = nd_get_link(nd);
38472 error = 0;
38473 if (s)
38474 error = __vfs_follow_link(nd, s);
38475 @@ -1702,6 +1730,9 @@ static int do_path_lookup(int dfd, const
38476 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38477
38478 if (likely(!retval)) {
38479 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38480 + return -ENOENT;
38481 +
38482 if (unlikely(!audit_dummy_context())) {
38483 if (nd->path.dentry && nd->inode)
38484 audit_inode(name, nd->path.dentry);
38485 @@ -2012,6 +2043,30 @@ int vfs_create(struct inode *dir, struct
38486 return error;
38487 }
38488
38489 +/*
38490 + * Note that while the flag value (low two bits) for sys_open means:
38491 + * 00 - read-only
38492 + * 01 - write-only
38493 + * 10 - read-write
38494 + * 11 - special
38495 + * it is changed into
38496 + * 00 - no permissions needed
38497 + * 01 - read-permission
38498 + * 10 - write-permission
38499 + * 11 - read-write
38500 + * for the internal routines (ie open_namei()/follow_link() etc)
38501 + * This is more logical, and also allows the 00 "no perm needed"
38502 + * to be used for symlinks (where the permissions are checked
38503 + * later).
38504 + *
38505 +*/
38506 +static inline int open_to_namei_flags(int flag)
38507 +{
38508 + if ((flag+1) & O_ACCMODE)
38509 + flag++;
38510 + return flag;
38511 +}
38512 +
38513 static int may_open(struct path *path, int acc_mode, int flag)
38514 {
38515 struct dentry *dentry = path->dentry;
38516 @@ -2064,7 +2119,27 @@ static int may_open(struct path *path, i
38517 /*
38518 * Ensure there are no outstanding leases on the file.
38519 */
38520 - return break_lease(inode, flag);
38521 + error = break_lease(inode, flag);
38522 +
38523 + if (error)
38524 + return error;
38525 +
38526 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38527 + error = -EPERM;
38528 + goto exit;
38529 + }
38530 +
38531 + if (gr_handle_rawio(inode)) {
38532 + error = -EPERM;
38533 + goto exit;
38534 + }
38535 +
38536 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38537 + error = -EACCES;
38538 + goto exit;
38539 + }
38540 +exit:
38541 + return error;
38542 }
38543
38544 static int handle_truncate(struct file *filp)
38545 @@ -2090,30 +2165,6 @@ static int handle_truncate(struct file *
38546 }
38547
38548 /*
38549 - * Note that while the flag value (low two bits) for sys_open means:
38550 - * 00 - read-only
38551 - * 01 - write-only
38552 - * 10 - read-write
38553 - * 11 - special
38554 - * it is changed into
38555 - * 00 - no permissions needed
38556 - * 01 - read-permission
38557 - * 10 - write-permission
38558 - * 11 - read-write
38559 - * for the internal routines (ie open_namei()/follow_link() etc)
38560 - * This is more logical, and also allows the 00 "no perm needed"
38561 - * to be used for symlinks (where the permissions are checked
38562 - * later).
38563 - *
38564 -*/
38565 -static inline int open_to_namei_flags(int flag)
38566 -{
38567 - if ((flag+1) & O_ACCMODE)
38568 - flag++;
38569 - return flag;
38570 -}
38571 -
38572 -/*
38573 * Handle the last step of open()
38574 */
38575 static struct file *do_last(struct nameidata *nd, struct path *path,
38576 @@ -2122,6 +2173,7 @@ static struct file *do_last(struct namei
38577 struct dentry *dir = nd->path.dentry;
38578 struct dentry *dentry;
38579 int open_flag = op->open_flag;
38580 + int flag = open_to_namei_flags(open_flag);
38581 int will_truncate = open_flag & O_TRUNC;
38582 int want_write = 0;
38583 int acc_mode = op->acc_mode;
38584 @@ -2217,6 +2269,12 @@ static struct file *do_last(struct namei
38585 /* Negative dentry, just create the file */
38586 if (!dentry->d_inode) {
38587 int mode = op->mode;
38588 +
38589 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38590 + error = -EACCES;
38591 + goto exit_mutex_unlock;
38592 + }
38593 +
38594 if (!IS_POSIXACL(dir->d_inode))
38595 mode &= ~current_umask();
38596 /*
38597 @@ -2240,6 +2298,8 @@ static struct file *do_last(struct namei
38598 error = vfs_create(dir->d_inode, dentry, mode, nd);
38599 if (error)
38600 goto exit_mutex_unlock;
38601 + else
38602 + gr_handle_create(path->dentry, path->mnt);
38603 mutex_unlock(&dir->d_inode->i_mutex);
38604 dput(nd->path.dentry);
38605 nd->path.dentry = dentry;
38606 @@ -2249,6 +2309,14 @@ static struct file *do_last(struct namei
38607 /*
38608 * It already exists.
38609 */
38610 +
38611 + /* only check if O_CREAT is specified, all other checks need to go
38612 + into may_open */
38613 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38614 + error = -EACCES;
38615 + goto exit_mutex_unlock;
38616 + }
38617 +
38618 mutex_unlock(&dir->d_inode->i_mutex);
38619 audit_inode(pathname, path->dentry);
38620
38621 @@ -2535,6 +2603,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38622 error = may_mknod(mode);
38623 if (error)
38624 goto out_dput;
38625 +
38626 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38627 + error = -EPERM;
38628 + goto out_dput;
38629 + }
38630 +
38631 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38632 + error = -EACCES;
38633 + goto out_dput;
38634 + }
38635 +
38636 error = mnt_want_write(nd.path.mnt);
38637 if (error)
38638 goto out_dput;
38639 @@ -2555,6 +2634,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38640 }
38641 out_drop_write:
38642 mnt_drop_write(nd.path.mnt);
38643 +
38644 + if (!error)
38645 + gr_handle_create(dentry, nd.path.mnt);
38646 out_dput:
38647 dput(dentry);
38648 out_unlock:
38649 @@ -2607,6 +2689,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38650 if (IS_ERR(dentry))
38651 goto out_unlock;
38652
38653 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38654 + error = -EACCES;
38655 + goto out_dput;
38656 + }
38657 +
38658 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38659 mode &= ~current_umask();
38660 error = mnt_want_write(nd.path.mnt);
38661 @@ -2618,6 +2705,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38662 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38663 out_drop_write:
38664 mnt_drop_write(nd.path.mnt);
38665 +
38666 + if (!error)
38667 + gr_handle_create(dentry, nd.path.mnt);
38668 +
38669 out_dput:
38670 dput(dentry);
38671 out_unlock:
38672 @@ -2697,6 +2788,8 @@ static long do_rmdir(int dfd, const char
38673 char * name;
38674 struct dentry *dentry;
38675 struct nameidata nd;
38676 + ino_t saved_ino = 0;
38677 + dev_t saved_dev = 0;
38678
38679 error = user_path_parent(dfd, pathname, &nd, &name);
38680 if (error)
38681 @@ -2721,6 +2814,19 @@ static long do_rmdir(int dfd, const char
38682 error = PTR_ERR(dentry);
38683 if (IS_ERR(dentry))
38684 goto exit2;
38685 +
38686 + if (dentry->d_inode != NULL) {
38687 + if (dentry->d_inode->i_nlink <= 1) {
38688 + saved_ino = dentry->d_inode->i_ino;
38689 + saved_dev = gr_get_dev_from_dentry(dentry);
38690 + }
38691 +
38692 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38693 + error = -EACCES;
38694 + goto exit3;
38695 + }
38696 + }
38697 +
38698 error = mnt_want_write(nd.path.mnt);
38699 if (error)
38700 goto exit3;
38701 @@ -2728,6 +2834,8 @@ static long do_rmdir(int dfd, const char
38702 if (error)
38703 goto exit4;
38704 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38705 + if (!error && (saved_dev || saved_ino))
38706 + gr_handle_delete(saved_ino, saved_dev);
38707 exit4:
38708 mnt_drop_write(nd.path.mnt);
38709 exit3:
38710 @@ -2790,6 +2898,8 @@ static long do_unlinkat(int dfd, const c
38711 struct dentry *dentry;
38712 struct nameidata nd;
38713 struct inode *inode = NULL;
38714 + ino_t saved_ino = 0;
38715 + dev_t saved_dev = 0;
38716
38717 error = user_path_parent(dfd, pathname, &nd, &name);
38718 if (error)
38719 @@ -2809,8 +2919,17 @@ static long do_unlinkat(int dfd, const c
38720 if (nd.last.name[nd.last.len])
38721 goto slashes;
38722 inode = dentry->d_inode;
38723 - if (inode)
38724 + if (inode) {
38725 ihold(inode);
38726 + if (inode->i_nlink <= 1) {
38727 + saved_ino = inode->i_ino;
38728 + saved_dev = gr_get_dev_from_dentry(dentry);
38729 + }
38730 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38731 + error = -EACCES;
38732 + goto exit2;
38733 + }
38734 + }
38735 error = mnt_want_write(nd.path.mnt);
38736 if (error)
38737 goto exit2;
38738 @@ -2818,6 +2937,8 @@ static long do_unlinkat(int dfd, const c
38739 if (error)
38740 goto exit3;
38741 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38742 + if (!error && (saved_ino || saved_dev))
38743 + gr_handle_delete(saved_ino, saved_dev);
38744 exit3:
38745 mnt_drop_write(nd.path.mnt);
38746 exit2:
38747 @@ -2895,6 +3016,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38748 if (IS_ERR(dentry))
38749 goto out_unlock;
38750
38751 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38752 + error = -EACCES;
38753 + goto out_dput;
38754 + }
38755 +
38756 error = mnt_want_write(nd.path.mnt);
38757 if (error)
38758 goto out_dput;
38759 @@ -2902,6 +3028,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38760 if (error)
38761 goto out_drop_write;
38762 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38763 + if (!error)
38764 + gr_handle_create(dentry, nd.path.mnt);
38765 out_drop_write:
38766 mnt_drop_write(nd.path.mnt);
38767 out_dput:
38768 @@ -3010,6 +3138,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38769 error = PTR_ERR(new_dentry);
38770 if (IS_ERR(new_dentry))
38771 goto out_unlock;
38772 +
38773 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38774 + old_path.dentry->d_inode,
38775 + old_path.dentry->d_inode->i_mode, to)) {
38776 + error = -EACCES;
38777 + goto out_dput;
38778 + }
38779 +
38780 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38781 + old_path.dentry, old_path.mnt, to)) {
38782 + error = -EACCES;
38783 + goto out_dput;
38784 + }
38785 +
38786 error = mnt_want_write(nd.path.mnt);
38787 if (error)
38788 goto out_dput;
38789 @@ -3017,6 +3159,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38790 if (error)
38791 goto out_drop_write;
38792 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38793 + if (!error)
38794 + gr_handle_create(new_dentry, nd.path.mnt);
38795 out_drop_write:
38796 mnt_drop_write(nd.path.mnt);
38797 out_dput:
38798 @@ -3194,6 +3338,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38799 char *to;
38800 int error;
38801
38802 + pax_track_stack();
38803 +
38804 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38805 if (error)
38806 goto exit;
38807 @@ -3250,6 +3396,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38808 if (new_dentry == trap)
38809 goto exit5;
38810
38811 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38812 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
38813 + to);
38814 + if (error)
38815 + goto exit5;
38816 +
38817 error = mnt_want_write(oldnd.path.mnt);
38818 if (error)
38819 goto exit5;
38820 @@ -3259,6 +3411,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38821 goto exit6;
38822 error = vfs_rename(old_dir->d_inode, old_dentry,
38823 new_dir->d_inode, new_dentry);
38824 + if (!error)
38825 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38826 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38827 exit6:
38828 mnt_drop_write(oldnd.path.mnt);
38829 exit5:
38830 @@ -3284,6 +3439,8 @@ SYSCALL_DEFINE2(rename, const char __use
38831
38832 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38833 {
38834 + char tmpbuf[64];
38835 + const char *newlink;
38836 int len;
38837
38838 len = PTR_ERR(link);
38839 @@ -3293,7 +3450,14 @@ int vfs_readlink(struct dentry *dentry,
38840 len = strlen(link);
38841 if (len > (unsigned) buflen)
38842 len = buflen;
38843 - if (copy_to_user(buffer, link, len))
38844 +
38845 + if (len < sizeof(tmpbuf)) {
38846 + memcpy(tmpbuf, link, len);
38847 + newlink = tmpbuf;
38848 + } else
38849 + newlink = link;
38850 +
38851 + if (copy_to_user(buffer, newlink, len))
38852 len = -EFAULT;
38853 out:
38854 return len;
38855 diff -urNp linux-2.6.39.4/fs/namespace.c linux-2.6.39.4/fs/namespace.c
38856 --- linux-2.6.39.4/fs/namespace.c 2011-05-19 00:06:34.000000000 -0400
38857 +++ linux-2.6.39.4/fs/namespace.c 2011-08-05 19:44:37.000000000 -0400
38858 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38859 if (!(sb->s_flags & MS_RDONLY))
38860 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38861 up_write(&sb->s_umount);
38862 +
38863 + gr_log_remount(mnt->mnt_devname, retval);
38864 +
38865 return retval;
38866 }
38867
38868 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38869 br_write_unlock(vfsmount_lock);
38870 up_write(&namespace_sem);
38871 release_mounts(&umount_list);
38872 +
38873 + gr_log_unmount(mnt->mnt_devname, retval);
38874 +
38875 return retval;
38876 }
38877
38878 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38879 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38880 MS_STRICTATIME);
38881
38882 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38883 + retval = -EPERM;
38884 + goto dput_out;
38885 + }
38886 +
38887 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38888 + retval = -EPERM;
38889 + goto dput_out;
38890 + }
38891 +
38892 if (flags & MS_REMOUNT)
38893 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38894 data_page);
38895 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38896 dev_name, data_page);
38897 dput_out:
38898 path_put(&path);
38899 +
38900 + gr_log_mount(dev_name, dir_name, retval);
38901 +
38902 return retval;
38903 }
38904
38905 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38906 if (error)
38907 goto out2;
38908
38909 + if (gr_handle_chroot_pivot()) {
38910 + error = -EPERM;
38911 + goto out2;
38912 + }
38913 +
38914 get_fs_root(current->fs, &root);
38915 error = lock_mount(&old);
38916 if (error)
38917 diff -urNp linux-2.6.39.4/fs/ncpfs/dir.c linux-2.6.39.4/fs/ncpfs/dir.c
38918 --- linux-2.6.39.4/fs/ncpfs/dir.c 2011-05-19 00:06:34.000000000 -0400
38919 +++ linux-2.6.39.4/fs/ncpfs/dir.c 2011-08-05 19:44:37.000000000 -0400
38920 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38921 int res, val = 0, len;
38922 __u8 __name[NCP_MAXPATHLEN + 1];
38923
38924 + pax_track_stack();
38925 +
38926 if (dentry == dentry->d_sb->s_root)
38927 return 1;
38928
38929 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
38930 int error, res, len;
38931 __u8 __name[NCP_MAXPATHLEN + 1];
38932
38933 + pax_track_stack();
38934 +
38935 error = -EIO;
38936 if (!ncp_conn_valid(server))
38937 goto finished;
38938 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
38939 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
38940 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
38941
38942 + pax_track_stack();
38943 +
38944 ncp_age_dentry(server, dentry);
38945 len = sizeof(__name);
38946 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
38947 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
38948 int error, len;
38949 __u8 __name[NCP_MAXPATHLEN + 1];
38950
38951 + pax_track_stack();
38952 +
38953 DPRINTK("ncp_mkdir: making %s/%s\n",
38954 dentry->d_parent->d_name.name, dentry->d_name.name);
38955
38956 @@ -1135,6 +1143,8 @@ static int ncp_rename(struct inode *old_
38957 int old_len, new_len;
38958 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
38959
38960 + pax_track_stack();
38961 +
38962 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
38963 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
38964 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
38965 diff -urNp linux-2.6.39.4/fs/ncpfs/inode.c linux-2.6.39.4/fs/ncpfs/inode.c
38966 --- linux-2.6.39.4/fs/ncpfs/inode.c 2011-05-19 00:06:34.000000000 -0400
38967 +++ linux-2.6.39.4/fs/ncpfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38968 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
38969 #endif
38970 struct ncp_entry_info finfo;
38971
38972 + pax_track_stack();
38973 +
38974 data.wdog_pid = NULL;
38975 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
38976 if (!server)
38977 diff -urNp linux-2.6.39.4/fs/nfs/inode.c linux-2.6.39.4/fs/nfs/inode.c
38978 --- linux-2.6.39.4/fs/nfs/inode.c 2011-07-09 09:18:51.000000000 -0400
38979 +++ linux-2.6.39.4/fs/nfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38980 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
38981 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
38982 nfsi->attrtimeo_timestamp = jiffies;
38983
38984 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
38985 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
38986 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
38987 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
38988 else
38989 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
38990 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
38991 }
38992
38993 -static atomic_long_t nfs_attr_generation_counter;
38994 +static atomic_long_unchecked_t nfs_attr_generation_counter;
38995
38996 static unsigned long nfs_read_attr_generation_counter(void)
38997 {
38998 - return atomic_long_read(&nfs_attr_generation_counter);
38999 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
39000 }
39001
39002 unsigned long nfs_inc_attr_generation_counter(void)
39003 {
39004 - return atomic_long_inc_return(&nfs_attr_generation_counter);
39005 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
39006 }
39007
39008 void nfs_fattr_init(struct nfs_fattr *fattr)
39009 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4state.c linux-2.6.39.4/fs/nfsd/nfs4state.c
39010 --- linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-05-19 00:06:34.000000000 -0400
39011 +++ linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-08-05 19:44:37.000000000 -0400
39012 @@ -3784,6 +3784,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
39013 unsigned int strhashval;
39014 int err;
39015
39016 + pax_track_stack();
39017 +
39018 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
39019 (long long) lock->lk_offset,
39020 (long long) lock->lk_length);
39021 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4xdr.c linux-2.6.39.4/fs/nfsd/nfs4xdr.c
39022 --- linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-05-19 00:06:34.000000000 -0400
39023 +++ linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-08-05 19:44:37.000000000 -0400
39024 @@ -1793,6 +1793,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
39025 .dentry = dentry,
39026 };
39027
39028 + pax_track_stack();
39029 +
39030 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
39031 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
39032 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
39033 diff -urNp linux-2.6.39.4/fs/nfsd/vfs.c linux-2.6.39.4/fs/nfsd/vfs.c
39034 --- linux-2.6.39.4/fs/nfsd/vfs.c 2011-07-09 09:18:51.000000000 -0400
39035 +++ linux-2.6.39.4/fs/nfsd/vfs.c 2011-08-05 19:44:37.000000000 -0400
39036 @@ -901,7 +901,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
39037 } else {
39038 oldfs = get_fs();
39039 set_fs(KERNEL_DS);
39040 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
39041 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
39042 set_fs(oldfs);
39043 }
39044
39045 @@ -1005,7 +1005,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
39046
39047 /* Write the data. */
39048 oldfs = get_fs(); set_fs(KERNEL_DS);
39049 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
39050 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
39051 set_fs(oldfs);
39052 if (host_err < 0)
39053 goto out_nfserr;
39054 @@ -1528,7 +1528,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
39055 */
39056
39057 oldfs = get_fs(); set_fs(KERNEL_DS);
39058 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
39059 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
39060 set_fs(oldfs);
39061
39062 if (host_err < 0)
39063 diff -urNp linux-2.6.39.4/fs/notify/notification.c linux-2.6.39.4/fs/notify/notification.c
39064 --- linux-2.6.39.4/fs/notify/notification.c 2011-05-19 00:06:34.000000000 -0400
39065 +++ linux-2.6.39.4/fs/notify/notification.c 2011-08-05 19:44:37.000000000 -0400
39066 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39067 * get set to 0 so it will never get 'freed'
39068 */
39069 static struct fsnotify_event *q_overflow_event;
39070 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39071 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39072
39073 /**
39074 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39075 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39076 */
39077 u32 fsnotify_get_cookie(void)
39078 {
39079 - return atomic_inc_return(&fsnotify_sync_cookie);
39080 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39081 }
39082 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39083
39084 diff -urNp linux-2.6.39.4/fs/ntfs/dir.c linux-2.6.39.4/fs/ntfs/dir.c
39085 --- linux-2.6.39.4/fs/ntfs/dir.c 2011-05-19 00:06:34.000000000 -0400
39086 +++ linux-2.6.39.4/fs/ntfs/dir.c 2011-08-05 19:44:37.000000000 -0400
39087 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
39088 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39089 ~(s64)(ndir->itype.index.block_size - 1)));
39090 /* Bounds checks. */
39091 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39092 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39093 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39094 "inode 0x%lx or driver bug.", vdir->i_ino);
39095 goto err_out;
39096 diff -urNp linux-2.6.39.4/fs/ntfs/file.c linux-2.6.39.4/fs/ntfs/file.c
39097 --- linux-2.6.39.4/fs/ntfs/file.c 2011-05-19 00:06:34.000000000 -0400
39098 +++ linux-2.6.39.4/fs/ntfs/file.c 2011-08-05 19:44:37.000000000 -0400
39099 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39100 #endif /* NTFS_RW */
39101 };
39102
39103 -const struct file_operations ntfs_empty_file_ops = {};
39104 +const struct file_operations ntfs_empty_file_ops __read_only;
39105
39106 -const struct inode_operations ntfs_empty_inode_ops = {};
39107 +const struct inode_operations ntfs_empty_inode_ops __read_only;
39108 diff -urNp linux-2.6.39.4/fs/ocfs2/localalloc.c linux-2.6.39.4/fs/ocfs2/localalloc.c
39109 --- linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-05-19 00:06:34.000000000 -0400
39110 +++ linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-08-05 19:44:37.000000000 -0400
39111 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39112 goto bail;
39113 }
39114
39115 - atomic_inc(&osb->alloc_stats.moves);
39116 + atomic_inc_unchecked(&osb->alloc_stats.moves);
39117
39118 bail:
39119 if (handle)
39120 diff -urNp linux-2.6.39.4/fs/ocfs2/namei.c linux-2.6.39.4/fs/ocfs2/namei.c
39121 --- linux-2.6.39.4/fs/ocfs2/namei.c 2011-05-19 00:06:34.000000000 -0400
39122 +++ linux-2.6.39.4/fs/ocfs2/namei.c 2011-08-05 19:44:37.000000000 -0400
39123 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39124 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39125 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39126
39127 + pax_track_stack();
39128 +
39129 /* At some point it might be nice to break this function up a
39130 * bit. */
39131
39132 diff -urNp linux-2.6.39.4/fs/ocfs2/ocfs2.h linux-2.6.39.4/fs/ocfs2/ocfs2.h
39133 --- linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-05-19 00:06:34.000000000 -0400
39134 +++ linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-08-05 19:44:37.000000000 -0400
39135 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
39136
39137 struct ocfs2_alloc_stats
39138 {
39139 - atomic_t moves;
39140 - atomic_t local_data;
39141 - atomic_t bitmap_data;
39142 - atomic_t bg_allocs;
39143 - atomic_t bg_extends;
39144 + atomic_unchecked_t moves;
39145 + atomic_unchecked_t local_data;
39146 + atomic_unchecked_t bitmap_data;
39147 + atomic_unchecked_t bg_allocs;
39148 + atomic_unchecked_t bg_extends;
39149 };
39150
39151 enum ocfs2_local_alloc_state
39152 diff -urNp linux-2.6.39.4/fs/ocfs2/suballoc.c linux-2.6.39.4/fs/ocfs2/suballoc.c
39153 --- linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-05-19 00:06:34.000000000 -0400
39154 +++ linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-08-05 19:44:37.000000000 -0400
39155 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39156 mlog_errno(status);
39157 goto bail;
39158 }
39159 - atomic_inc(&osb->alloc_stats.bg_extends);
39160 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39161
39162 /* You should never ask for this much metadata */
39163 BUG_ON(bits_wanted >
39164 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39165 mlog_errno(status);
39166 goto bail;
39167 }
39168 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39169 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39170
39171 *suballoc_loc = res.sr_bg_blkno;
39172 *suballoc_bit_start = res.sr_bit_offset;
39173 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39174 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39175 res->sr_bits);
39176
39177 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39178 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39179
39180 BUG_ON(res->sr_bits != 1);
39181
39182 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39183 mlog_errno(status);
39184 goto bail;
39185 }
39186 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39187 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39188
39189 BUG_ON(res.sr_bits != 1);
39190
39191 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39192 cluster_start,
39193 num_clusters);
39194 if (!status)
39195 - atomic_inc(&osb->alloc_stats.local_data);
39196 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
39197 } else {
39198 if (min_clusters > (osb->bitmap_cpg - 1)) {
39199 /* The only paths asking for contiguousness
39200 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39201 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39202 res.sr_bg_blkno,
39203 res.sr_bit_offset);
39204 - atomic_inc(&osb->alloc_stats.bitmap_data);
39205 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39206 *num_clusters = res.sr_bits;
39207 }
39208 }
39209 diff -urNp linux-2.6.39.4/fs/ocfs2/super.c linux-2.6.39.4/fs/ocfs2/super.c
39210 --- linux-2.6.39.4/fs/ocfs2/super.c 2011-05-19 00:06:34.000000000 -0400
39211 +++ linux-2.6.39.4/fs/ocfs2/super.c 2011-08-05 19:44:37.000000000 -0400
39212 @@ -299,11 +299,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39213 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39214 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39215 "Stats",
39216 - atomic_read(&osb->alloc_stats.bitmap_data),
39217 - atomic_read(&osb->alloc_stats.local_data),
39218 - atomic_read(&osb->alloc_stats.bg_allocs),
39219 - atomic_read(&osb->alloc_stats.moves),
39220 - atomic_read(&osb->alloc_stats.bg_extends));
39221 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39222 + atomic_read_unchecked(&osb->alloc_stats.local_data),
39223 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39224 + atomic_read_unchecked(&osb->alloc_stats.moves),
39225 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39226
39227 out += snprintf(buf + out, len - out,
39228 "%10s => State: %u Descriptor: %llu Size: %u bits "
39229 @@ -2111,11 +2111,11 @@ static int ocfs2_initialize_super(struct
39230 spin_lock_init(&osb->osb_xattr_lock);
39231 ocfs2_init_steal_slots(osb);
39232
39233 - atomic_set(&osb->alloc_stats.moves, 0);
39234 - atomic_set(&osb->alloc_stats.local_data, 0);
39235 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
39236 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
39237 - atomic_set(&osb->alloc_stats.bg_extends, 0);
39238 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39239 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39240 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39241 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39242 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39243
39244 /* Copy the blockcheck stats from the superblock probe */
39245 osb->osb_ecc_stats = *stats;
39246 diff -urNp linux-2.6.39.4/fs/ocfs2/symlink.c linux-2.6.39.4/fs/ocfs2/symlink.c
39247 --- linux-2.6.39.4/fs/ocfs2/symlink.c 2011-05-19 00:06:34.000000000 -0400
39248 +++ linux-2.6.39.4/fs/ocfs2/symlink.c 2011-08-05 19:44:37.000000000 -0400
39249 @@ -142,7 +142,7 @@ bail:
39250
39251 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39252 {
39253 - char *link = nd_get_link(nd);
39254 + const char *link = nd_get_link(nd);
39255 if (!IS_ERR(link))
39256 kfree(link);
39257 }
39258 diff -urNp linux-2.6.39.4/fs/open.c linux-2.6.39.4/fs/open.c
39259 --- linux-2.6.39.4/fs/open.c 2011-05-19 00:06:34.000000000 -0400
39260 +++ linux-2.6.39.4/fs/open.c 2011-08-05 19:44:37.000000000 -0400
39261 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39262 error = locks_verify_truncate(inode, NULL, length);
39263 if (!error)
39264 error = security_path_truncate(&path);
39265 +
39266 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39267 + error = -EACCES;
39268 +
39269 if (!error)
39270 error = do_truncate(path.dentry, length, 0, NULL);
39271
39272 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39273 if (__mnt_is_readonly(path.mnt))
39274 res = -EROFS;
39275
39276 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39277 + res = -EACCES;
39278 +
39279 out_path_release:
39280 path_put(&path);
39281 out:
39282 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39283 if (error)
39284 goto dput_and_out;
39285
39286 + gr_log_chdir(path.dentry, path.mnt);
39287 +
39288 set_fs_pwd(current->fs, &path);
39289
39290 dput_and_out:
39291 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39292 goto out_putf;
39293
39294 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39295 +
39296 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39297 + error = -EPERM;
39298 +
39299 + if (!error)
39300 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39301 +
39302 if (!error)
39303 set_fs_pwd(current->fs, &file->f_path);
39304 out_putf:
39305 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39306 if (error)
39307 goto dput_and_out;
39308
39309 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39310 + goto dput_and_out;
39311 +
39312 + if (gr_handle_chroot_caps(&path)) {
39313 + error = -ENOMEM;
39314 + goto dput_and_out;
39315 + }
39316 +
39317 set_fs_root(current->fs, &path);
39318 +
39319 + gr_handle_chroot_chdir(&path);
39320 +
39321 error = 0;
39322 dput_and_out:
39323 path_put(&path);
39324 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39325 err = mnt_want_write_file(file);
39326 if (err)
39327 goto out_putf;
39328 +
39329 mutex_lock(&inode->i_mutex);
39330 +
39331 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39332 + err = -EACCES;
39333 + goto out_unlock;
39334 + }
39335 +
39336 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39337 if (err)
39338 goto out_unlock;
39339 if (mode == (mode_t) -1)
39340 mode = inode->i_mode;
39341 +
39342 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39343 + err = -EACCES;
39344 + goto out_unlock;
39345 + }
39346 +
39347 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39348 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39349 err = notify_change(dentry, &newattrs);
39350 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39351 error = mnt_want_write(path.mnt);
39352 if (error)
39353 goto dput_and_out;
39354 +
39355 mutex_lock(&inode->i_mutex);
39356 +
39357 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39358 + error = -EACCES;
39359 + goto out_unlock;
39360 + }
39361 +
39362 error = security_path_chmod(path.dentry, path.mnt, mode);
39363 if (error)
39364 goto out_unlock;
39365 if (mode == (mode_t) -1)
39366 mode = inode->i_mode;
39367 +
39368 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39369 + error = -EACCES;
39370 + goto out_unlock;
39371 + }
39372 +
39373 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39374 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39375 error = notify_change(path.dentry, &newattrs);
39376 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39377 int error;
39378 struct iattr newattrs;
39379
39380 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
39381 + return -EACCES;
39382 +
39383 newattrs.ia_valid = ATTR_CTIME;
39384 if (user != (uid_t) -1) {
39385 newattrs.ia_valid |= ATTR_UID;
39386 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39387 if (!IS_ERR(tmp)) {
39388 fd = get_unused_fd_flags(flags);
39389 if (fd >= 0) {
39390 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39391 + struct file *f;
39392 + /* don't allow to be set by userland */
39393 + flags &= ~FMODE_GREXEC;
39394 + f = do_filp_open(dfd, tmp, &op, lookup);
39395 if (IS_ERR(f)) {
39396 put_unused_fd(fd);
39397 fd = PTR_ERR(f);
39398 diff -urNp linux-2.6.39.4/fs/partitions/ldm.c linux-2.6.39.4/fs/partitions/ldm.c
39399 --- linux-2.6.39.4/fs/partitions/ldm.c 2011-06-03 00:04:14.000000000 -0400
39400 +++ linux-2.6.39.4/fs/partitions/ldm.c 2011-08-05 19:44:37.000000000 -0400
39401 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39402 ldm_error ("A VBLK claims to have %d parts.", num);
39403 return false;
39404 }
39405 +
39406 if (rec >= num) {
39407 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39408 return false;
39409 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39410 goto found;
39411 }
39412
39413 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39414 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39415 if (!f) {
39416 ldm_crit ("Out of memory.");
39417 return false;
39418 diff -urNp linux-2.6.39.4/fs/pipe.c linux-2.6.39.4/fs/pipe.c
39419 --- linux-2.6.39.4/fs/pipe.c 2011-05-19 00:06:34.000000000 -0400
39420 +++ linux-2.6.39.4/fs/pipe.c 2011-08-05 19:44:37.000000000 -0400
39421 @@ -420,9 +420,9 @@ redo:
39422 }
39423 if (bufs) /* More to do? */
39424 continue;
39425 - if (!pipe->writers)
39426 + if (!atomic_read(&pipe->writers))
39427 break;
39428 - if (!pipe->waiting_writers) {
39429 + if (!atomic_read(&pipe->waiting_writers)) {
39430 /* syscall merging: Usually we must not sleep
39431 * if O_NONBLOCK is set, or if we got some data.
39432 * But if a writer sleeps in kernel space, then
39433 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39434 mutex_lock(&inode->i_mutex);
39435 pipe = inode->i_pipe;
39436
39437 - if (!pipe->readers) {
39438 + if (!atomic_read(&pipe->readers)) {
39439 send_sig(SIGPIPE, current, 0);
39440 ret = -EPIPE;
39441 goto out;
39442 @@ -530,7 +530,7 @@ redo1:
39443 for (;;) {
39444 int bufs;
39445
39446 - if (!pipe->readers) {
39447 + if (!atomic_read(&pipe->readers)) {
39448 send_sig(SIGPIPE, current, 0);
39449 if (!ret)
39450 ret = -EPIPE;
39451 @@ -616,9 +616,9 @@ redo2:
39452 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39453 do_wakeup = 0;
39454 }
39455 - pipe->waiting_writers++;
39456 + atomic_inc(&pipe->waiting_writers);
39457 pipe_wait(pipe);
39458 - pipe->waiting_writers--;
39459 + atomic_dec(&pipe->waiting_writers);
39460 }
39461 out:
39462 mutex_unlock(&inode->i_mutex);
39463 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39464 mask = 0;
39465 if (filp->f_mode & FMODE_READ) {
39466 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39467 - if (!pipe->writers && filp->f_version != pipe->w_counter)
39468 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39469 mask |= POLLHUP;
39470 }
39471
39472 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39473 * Most Unices do not set POLLERR for FIFOs but on Linux they
39474 * behave exactly like pipes for poll().
39475 */
39476 - if (!pipe->readers)
39477 + if (!atomic_read(&pipe->readers))
39478 mask |= POLLERR;
39479 }
39480
39481 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39482
39483 mutex_lock(&inode->i_mutex);
39484 pipe = inode->i_pipe;
39485 - pipe->readers -= decr;
39486 - pipe->writers -= decw;
39487 + atomic_sub(decr, &pipe->readers);
39488 + atomic_sub(decw, &pipe->writers);
39489
39490 - if (!pipe->readers && !pipe->writers) {
39491 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39492 free_pipe_info(inode);
39493 } else {
39494 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39495 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39496
39497 if (inode->i_pipe) {
39498 ret = 0;
39499 - inode->i_pipe->readers++;
39500 + atomic_inc(&inode->i_pipe->readers);
39501 }
39502
39503 mutex_unlock(&inode->i_mutex);
39504 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39505
39506 if (inode->i_pipe) {
39507 ret = 0;
39508 - inode->i_pipe->writers++;
39509 + atomic_inc(&inode->i_pipe->writers);
39510 }
39511
39512 mutex_unlock(&inode->i_mutex);
39513 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39514 if (inode->i_pipe) {
39515 ret = 0;
39516 if (filp->f_mode & FMODE_READ)
39517 - inode->i_pipe->readers++;
39518 + atomic_inc(&inode->i_pipe->readers);
39519 if (filp->f_mode & FMODE_WRITE)
39520 - inode->i_pipe->writers++;
39521 + atomic_inc(&inode->i_pipe->writers);
39522 }
39523
39524 mutex_unlock(&inode->i_mutex);
39525 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39526 inode->i_pipe = NULL;
39527 }
39528
39529 -static struct vfsmount *pipe_mnt __read_mostly;
39530 +struct vfsmount *pipe_mnt __read_mostly;
39531
39532 /*
39533 * pipefs_dname() is called from d_path().
39534 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39535 goto fail_iput;
39536 inode->i_pipe = pipe;
39537
39538 - pipe->readers = pipe->writers = 1;
39539 + atomic_set(&pipe->readers, 1);
39540 + atomic_set(&pipe->writers, 1);
39541 inode->i_fop = &rdwr_pipefifo_fops;
39542
39543 /*
39544 diff -urNp linux-2.6.39.4/fs/proc/array.c linux-2.6.39.4/fs/proc/array.c
39545 --- linux-2.6.39.4/fs/proc/array.c 2011-05-19 00:06:34.000000000 -0400
39546 +++ linux-2.6.39.4/fs/proc/array.c 2011-08-05 19:44:37.000000000 -0400
39547 @@ -60,6 +60,7 @@
39548 #include <linux/tty.h>
39549 #include <linux/string.h>
39550 #include <linux/mman.h>
39551 +#include <linux/grsecurity.h>
39552 #include <linux/proc_fs.h>
39553 #include <linux/ioport.h>
39554 #include <linux/uaccess.h>
39555 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39556 seq_putc(m, '\n');
39557 }
39558
39559 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39560 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
39561 +{
39562 + if (p->mm)
39563 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39564 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39565 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39566 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39567 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39568 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39569 + else
39570 + seq_printf(m, "PaX:\t-----\n");
39571 +}
39572 +#endif
39573 +
39574 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39575 struct pid *pid, struct task_struct *task)
39576 {
39577 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39578 task_cpus_allowed(m, task);
39579 cpuset_task_status_allowed(m, task);
39580 task_context_switch_counts(m, task);
39581 +
39582 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39583 + task_pax(m, task);
39584 +#endif
39585 +
39586 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39587 + task_grsec_rbac(m, task);
39588 +#endif
39589 +
39590 return 0;
39591 }
39592
39593 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39594 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39595 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39596 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39597 +#endif
39598 +
39599 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39600 struct pid *pid, struct task_struct *task, int whole)
39601 {
39602 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39603 cputime_t cutime, cstime, utime, stime;
39604 cputime_t cgtime, gtime;
39605 unsigned long rsslim = 0;
39606 - char tcomm[sizeof(task->comm)];
39607 + char tcomm[sizeof(task->comm)] = { 0 };
39608 unsigned long flags;
39609
39610 + pax_track_stack();
39611 +
39612 state = *get_task_state(task);
39613 vsize = eip = esp = 0;
39614 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39615 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39616 gtime = task->gtime;
39617 }
39618
39619 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39620 + if (PAX_RAND_FLAGS(mm)) {
39621 + eip = 0;
39622 + esp = 0;
39623 + wchan = 0;
39624 + }
39625 +#endif
39626 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39627 + wchan = 0;
39628 + eip =0;
39629 + esp =0;
39630 +#endif
39631 +
39632 /* scale priority and nice values from timeslices to -20..20 */
39633 /* to make it look like a "normal" Unix priority/nice value */
39634 priority = task_prio(task);
39635 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39636 vsize,
39637 mm ? get_mm_rss(mm) : 0,
39638 rsslim,
39639 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39640 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39641 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39642 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39643 +#else
39644 mm ? (permitted ? mm->start_code : 1) : 0,
39645 mm ? (permitted ? mm->end_code : 1) : 0,
39646 (permitted && mm) ? mm->start_stack : 0,
39647 +#endif
39648 esp,
39649 eip,
39650 /* The signal information here is obsolete.
39651 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39652
39653 return 0;
39654 }
39655 +
39656 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39657 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39658 +{
39659 + u32 curr_ip = 0;
39660 + unsigned long flags;
39661 +
39662 + if (lock_task_sighand(task, &flags)) {
39663 + curr_ip = task->signal->curr_ip;
39664 + unlock_task_sighand(task, &flags);
39665 + }
39666 +
39667 + return sprintf(buffer, "%pI4\n", &curr_ip);
39668 +}
39669 +#endif
39670 diff -urNp linux-2.6.39.4/fs/proc/base.c linux-2.6.39.4/fs/proc/base.c
39671 --- linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:11:51.000000000 -0400
39672 +++ linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:13:18.000000000 -0400
39673 @@ -104,6 +104,22 @@ struct pid_entry {
39674 union proc_op op;
39675 };
39676
39677 +struct getdents_callback {
39678 + struct linux_dirent __user * current_dir;
39679 + struct linux_dirent __user * previous;
39680 + struct file * file;
39681 + int count;
39682 + int error;
39683 +};
39684 +
39685 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39686 + loff_t offset, u64 ino, unsigned int d_type)
39687 +{
39688 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
39689 + buf->error = -EINVAL;
39690 + return 0;
39691 +}
39692 +
39693 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39694 .name = (NAME), \
39695 .len = sizeof(NAME) - 1, \
39696 @@ -206,6 +222,9 @@ static struct mm_struct *__check_mem_per
39697 if (task == current)
39698 return mm;
39699
39700 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39701 + return ERR_PTR(-EPERM);
39702 +
39703 /*
39704 * If current is actively ptrace'ing, and would also be
39705 * permitted to freshly attach with ptrace now, permit it.
39706 @@ -279,6 +298,9 @@ static int proc_pid_cmdline(struct task_
39707 if (!mm->arg_end)
39708 goto out_mm; /* Shh! No looking before we're done */
39709
39710 + if (gr_acl_handle_procpidmem(task))
39711 + goto out_mm;
39712 +
39713 len = mm->arg_end - mm->arg_start;
39714
39715 if (len > PAGE_SIZE)
39716 @@ -306,12 +328,28 @@ out:
39717 return res;
39718 }
39719
39720 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39721 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39722 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39723 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39724 +#endif
39725 +
39726 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39727 {
39728 struct mm_struct *mm = mm_for_maps(task);
39729 int res = PTR_ERR(mm);
39730 if (mm && !IS_ERR(mm)) {
39731 unsigned int nwords = 0;
39732 +
39733 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39734 + /* allow if we're currently ptracing this task */
39735 + if (PAX_RAND_FLAGS(mm) &&
39736 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39737 + mmput(mm);
39738 + return res;
39739 + }
39740 +#endif
39741 +
39742 do {
39743 nwords += 2;
39744 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39745 @@ -325,7 +363,7 @@ static int proc_pid_auxv(struct task_str
39746 }
39747
39748
39749 -#ifdef CONFIG_KALLSYMS
39750 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39751 /*
39752 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39753 * Returns the resolved symbol. If that fails, simply return the address.
39754 @@ -364,7 +402,7 @@ static void unlock_trace(struct task_str
39755 mutex_unlock(&task->signal->cred_guard_mutex);
39756 }
39757
39758 -#ifdef CONFIG_STACKTRACE
39759 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39760
39761 #define MAX_STACK_TRACE_DEPTH 64
39762
39763 @@ -555,7 +593,7 @@ static int proc_pid_limits(struct task_s
39764 return count;
39765 }
39766
39767 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39768 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39769 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39770 {
39771 long nr;
39772 @@ -584,7 +622,7 @@ static int proc_pid_syscall(struct task_
39773 /************************************************************************/
39774
39775 /* permission checks */
39776 -static int proc_fd_access_allowed(struct inode *inode)
39777 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39778 {
39779 struct task_struct *task;
39780 int allowed = 0;
39781 @@ -594,7 +632,10 @@ static int proc_fd_access_allowed(struct
39782 */
39783 task = get_proc_task(inode);
39784 if (task) {
39785 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39786 + if (log)
39787 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39788 + else
39789 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39790 put_task_struct(task);
39791 }
39792 return allowed;
39793 @@ -973,6 +1014,9 @@ static ssize_t environ_read(struct file
39794 if (!task)
39795 goto out_no_task;
39796
39797 + if (gr_acl_handle_procpidmem(task))
39798 + goto out;
39799 +
39800 ret = -ENOMEM;
39801 page = (char *)__get_free_page(GFP_TEMPORARY);
39802 if (!page)
39803 @@ -1660,7 +1704,7 @@ static void *proc_pid_follow_link(struct
39804 path_put(&nd->path);
39805
39806 /* Are we allowed to snoop on the tasks file descriptors? */
39807 - if (!proc_fd_access_allowed(inode))
39808 + if (!proc_fd_access_allowed(inode,0))
39809 goto out;
39810
39811 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39812 @@ -1699,8 +1743,18 @@ static int proc_pid_readlink(struct dent
39813 struct path path;
39814
39815 /* Are we allowed to snoop on the tasks file descriptors? */
39816 - if (!proc_fd_access_allowed(inode))
39817 - goto out;
39818 + /* logging this is needed for learning on chromium to work properly,
39819 + but we don't want to flood the logs from 'ps' which does a readlink
39820 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39821 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
39822 + */
39823 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39824 + if (!proc_fd_access_allowed(inode,0))
39825 + goto out;
39826 + } else {
39827 + if (!proc_fd_access_allowed(inode,1))
39828 + goto out;
39829 + }
39830
39831 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39832 if (error)
39833 @@ -1766,7 +1820,11 @@ static struct inode *proc_pid_make_inode
39834 rcu_read_lock();
39835 cred = __task_cred(task);
39836 inode->i_uid = cred->euid;
39837 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39838 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39839 +#else
39840 inode->i_gid = cred->egid;
39841 +#endif
39842 rcu_read_unlock();
39843 }
39844 security_task_to_inode(task, inode);
39845 @@ -1784,6 +1842,9 @@ static int pid_getattr(struct vfsmount *
39846 struct inode *inode = dentry->d_inode;
39847 struct task_struct *task;
39848 const struct cred *cred;
39849 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39850 + const struct cred *tmpcred = current_cred();
39851 +#endif
39852
39853 generic_fillattr(inode, stat);
39854
39855 @@ -1791,13 +1852,41 @@ static int pid_getattr(struct vfsmount *
39856 stat->uid = 0;
39857 stat->gid = 0;
39858 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39859 +
39860 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39861 + rcu_read_unlock();
39862 + return -ENOENT;
39863 + }
39864 +
39865 if (task) {
39866 + cred = __task_cred(task);
39867 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39868 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39869 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39870 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39871 +#endif
39872 + ) {
39873 +#endif
39874 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39875 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39876 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39877 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39878 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39879 +#endif
39880 task_dumpable(task)) {
39881 - cred = __task_cred(task);
39882 stat->uid = cred->euid;
39883 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39884 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39885 +#else
39886 stat->gid = cred->egid;
39887 +#endif
39888 }
39889 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39890 + } else {
39891 + rcu_read_unlock();
39892 + return -ENOENT;
39893 + }
39894 +#endif
39895 }
39896 rcu_read_unlock();
39897 return 0;
39898 @@ -1834,11 +1923,20 @@ static int pid_revalidate(struct dentry
39899
39900 if (task) {
39901 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39902 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39903 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39904 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39905 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39906 +#endif
39907 task_dumpable(task)) {
39908 rcu_read_lock();
39909 cred = __task_cred(task);
39910 inode->i_uid = cred->euid;
39911 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39912 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39913 +#else
39914 inode->i_gid = cred->egid;
39915 +#endif
39916 rcu_read_unlock();
39917 } else {
39918 inode->i_uid = 0;
39919 @@ -1959,7 +2057,8 @@ static int proc_fd_info(struct inode *in
39920 int fd = proc_fd(inode);
39921
39922 if (task) {
39923 - files = get_files_struct(task);
39924 + if (!gr_acl_handle_procpidmem(task))
39925 + files = get_files_struct(task);
39926 put_task_struct(task);
39927 }
39928 if (files) {
39929 @@ -2219,15 +2318,25 @@ static const struct file_operations proc
39930 */
39931 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
39932 {
39933 + struct task_struct *task;
39934 int rv;
39935
39936 if (flags & IPERM_FLAG_RCU)
39937 return -ECHILD;
39938 rv = generic_permission(inode, mask, flags, NULL);
39939 - if (rv == 0)
39940 - return 0;
39941 +
39942 if (task_pid(current) == proc_pid(inode))
39943 rv = 0;
39944 +
39945 + task = get_proc_task(inode);
39946 + if (task == NULL)
39947 + return rv;
39948 +
39949 + if (gr_acl_handle_procpidmem(task))
39950 + rv = -EACCES;
39951 +
39952 + put_task_struct(task);
39953 +
39954 return rv;
39955 }
39956
39957 @@ -2337,6 +2446,9 @@ static struct dentry *proc_pident_lookup
39958 if (!task)
39959 goto out_no_task;
39960
39961 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39962 + goto out;
39963 +
39964 /*
39965 * Yes, it does not scale. And it should not. Don't add
39966 * new entries into /proc/<tgid>/ without very good reasons.
39967 @@ -2381,6 +2493,9 @@ static int proc_pident_readdir(struct fi
39968 if (!task)
39969 goto out_no_task;
39970
39971 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
39972 + goto out;
39973 +
39974 ret = 0;
39975 i = filp->f_pos;
39976 switch (i) {
39977 @@ -2651,7 +2766,7 @@ static void *proc_self_follow_link(struc
39978 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
39979 void *cookie)
39980 {
39981 - char *s = nd_get_link(nd);
39982 + const char *s = nd_get_link(nd);
39983 if (!IS_ERR(s))
39984 __putname(s);
39985 }
39986 @@ -2838,7 +2953,7 @@ static const struct pid_entry tgid_base_
39987 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
39988 #endif
39989 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
39990 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39991 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39992 INF("syscall", S_IRUGO, proc_pid_syscall),
39993 #endif
39994 INF("cmdline", S_IRUGO, proc_pid_cmdline),
39995 @@ -2863,10 +2978,10 @@ static const struct pid_entry tgid_base_
39996 #ifdef CONFIG_SECURITY
39997 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
39998 #endif
39999 -#ifdef CONFIG_KALLSYMS
40000 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40001 INF("wchan", S_IRUGO, proc_pid_wchan),
40002 #endif
40003 -#ifdef CONFIG_STACKTRACE
40004 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40005 ONE("stack", S_IRUGO, proc_pid_stack),
40006 #endif
40007 #ifdef CONFIG_SCHEDSTATS
40008 @@ -2897,6 +3012,9 @@ static const struct pid_entry tgid_base_
40009 #ifdef CONFIG_TASK_IO_ACCOUNTING
40010 INF("io", S_IRUSR, proc_tgid_io_accounting),
40011 #endif
40012 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40013 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
40014 +#endif
40015 };
40016
40017 static int proc_tgid_base_readdir(struct file * filp,
40018 @@ -3022,7 +3140,14 @@ static struct dentry *proc_pid_instantia
40019 if (!inode)
40020 goto out;
40021
40022 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40023 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
40024 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40025 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40026 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
40027 +#else
40028 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
40029 +#endif
40030 inode->i_op = &proc_tgid_base_inode_operations;
40031 inode->i_fop = &proc_tgid_base_operations;
40032 inode->i_flags|=S_IMMUTABLE;
40033 @@ -3064,7 +3189,11 @@ struct dentry *proc_pid_lookup(struct in
40034 if (!task)
40035 goto out;
40036
40037 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40038 + goto out_put_task;
40039 +
40040 result = proc_pid_instantiate(dir, dentry, task, NULL);
40041 +out_put_task:
40042 put_task_struct(task);
40043 out:
40044 return result;
40045 @@ -3129,6 +3258,11 @@ int proc_pid_readdir(struct file * filp,
40046 {
40047 unsigned int nr;
40048 struct task_struct *reaper;
40049 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40050 + const struct cred *tmpcred = current_cred();
40051 + const struct cred *itercred;
40052 +#endif
40053 + filldir_t __filldir = filldir;
40054 struct tgid_iter iter;
40055 struct pid_namespace *ns;
40056
40057 @@ -3152,8 +3286,27 @@ int proc_pid_readdir(struct file * filp,
40058 for (iter = next_tgid(ns, iter);
40059 iter.task;
40060 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40061 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40062 + rcu_read_lock();
40063 + itercred = __task_cred(iter.task);
40064 +#endif
40065 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40066 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40067 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40068 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40069 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40070 +#endif
40071 + )
40072 +#endif
40073 + )
40074 + __filldir = &gr_fake_filldir;
40075 + else
40076 + __filldir = filldir;
40077 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40078 + rcu_read_unlock();
40079 +#endif
40080 filp->f_pos = iter.tgid + TGID_OFFSET;
40081 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40082 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40083 put_task_struct(iter.task);
40084 goto out;
40085 }
40086 @@ -3180,7 +3333,7 @@ static const struct pid_entry tid_base_s
40087 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40088 #endif
40089 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40090 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40091 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40092 INF("syscall", S_IRUGO, proc_pid_syscall),
40093 #endif
40094 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40095 @@ -3204,10 +3357,10 @@ static const struct pid_entry tid_base_s
40096 #ifdef CONFIG_SECURITY
40097 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40098 #endif
40099 -#ifdef CONFIG_KALLSYMS
40100 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40101 INF("wchan", S_IRUGO, proc_pid_wchan),
40102 #endif
40103 -#ifdef CONFIG_STACKTRACE
40104 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40105 ONE("stack", S_IRUGO, proc_pid_stack),
40106 #endif
40107 #ifdef CONFIG_SCHEDSTATS
40108 diff -urNp linux-2.6.39.4/fs/proc/cmdline.c linux-2.6.39.4/fs/proc/cmdline.c
40109 --- linux-2.6.39.4/fs/proc/cmdline.c 2011-05-19 00:06:34.000000000 -0400
40110 +++ linux-2.6.39.4/fs/proc/cmdline.c 2011-08-05 19:44:37.000000000 -0400
40111 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
40112
40113 static int __init proc_cmdline_init(void)
40114 {
40115 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40116 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40117 +#else
40118 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40119 +#endif
40120 return 0;
40121 }
40122 module_init(proc_cmdline_init);
40123 diff -urNp linux-2.6.39.4/fs/proc/devices.c linux-2.6.39.4/fs/proc/devices.c
40124 --- linux-2.6.39.4/fs/proc/devices.c 2011-05-19 00:06:34.000000000 -0400
40125 +++ linux-2.6.39.4/fs/proc/devices.c 2011-08-05 19:44:37.000000000 -0400
40126 @@ -64,7 +64,11 @@ static const struct file_operations proc
40127
40128 static int __init proc_devices_init(void)
40129 {
40130 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40131 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40132 +#else
40133 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40134 +#endif
40135 return 0;
40136 }
40137 module_init(proc_devices_init);
40138 diff -urNp linux-2.6.39.4/fs/proc/inode.c linux-2.6.39.4/fs/proc/inode.c
40139 --- linux-2.6.39.4/fs/proc/inode.c 2011-05-19 00:06:34.000000000 -0400
40140 +++ linux-2.6.39.4/fs/proc/inode.c 2011-08-05 19:44:37.000000000 -0400
40141 @@ -433,7 +433,11 @@ struct inode *proc_get_inode(struct supe
40142 if (de->mode) {
40143 inode->i_mode = de->mode;
40144 inode->i_uid = de->uid;
40145 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40146 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40147 +#else
40148 inode->i_gid = de->gid;
40149 +#endif
40150 }
40151 if (de->size)
40152 inode->i_size = de->size;
40153 diff -urNp linux-2.6.39.4/fs/proc/internal.h linux-2.6.39.4/fs/proc/internal.h
40154 --- linux-2.6.39.4/fs/proc/internal.h 2011-05-19 00:06:34.000000000 -0400
40155 +++ linux-2.6.39.4/fs/proc/internal.h 2011-08-05 19:44:37.000000000 -0400
40156 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40157 struct pid *pid, struct task_struct *task);
40158 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40159 struct pid *pid, struct task_struct *task);
40160 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40161 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40162 +#endif
40163 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40164
40165 extern const struct file_operations proc_maps_operations;
40166 diff -urNp linux-2.6.39.4/fs/proc/Kconfig linux-2.6.39.4/fs/proc/Kconfig
40167 --- linux-2.6.39.4/fs/proc/Kconfig 2011-05-19 00:06:34.000000000 -0400
40168 +++ linux-2.6.39.4/fs/proc/Kconfig 2011-08-05 19:44:37.000000000 -0400
40169 @@ -30,12 +30,12 @@ config PROC_FS
40170
40171 config PROC_KCORE
40172 bool "/proc/kcore support" if !ARM
40173 - depends on PROC_FS && MMU
40174 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40175
40176 config PROC_VMCORE
40177 bool "/proc/vmcore support"
40178 - depends on PROC_FS && CRASH_DUMP
40179 - default y
40180 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40181 + default n
40182 help
40183 Exports the dump image of crashed kernel in ELF format.
40184
40185 @@ -59,8 +59,8 @@ config PROC_SYSCTL
40186 limited in memory.
40187
40188 config PROC_PAGE_MONITOR
40189 - default y
40190 - depends on PROC_FS && MMU
40191 + default n
40192 + depends on PROC_FS && MMU && !GRKERNSEC
40193 bool "Enable /proc page monitoring" if EXPERT
40194 help
40195 Various /proc files exist to monitor process memory utilization:
40196 diff -urNp linux-2.6.39.4/fs/proc/kcore.c linux-2.6.39.4/fs/proc/kcore.c
40197 --- linux-2.6.39.4/fs/proc/kcore.c 2011-05-19 00:06:34.000000000 -0400
40198 +++ linux-2.6.39.4/fs/proc/kcore.c 2011-08-05 19:44:37.000000000 -0400
40199 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40200 off_t offset = 0;
40201 struct kcore_list *m;
40202
40203 + pax_track_stack();
40204 +
40205 /* setup ELF header */
40206 elf = (struct elfhdr *) bufp;
40207 bufp += sizeof(struct elfhdr);
40208 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40209 * the addresses in the elf_phdr on our list.
40210 */
40211 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40212 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40213 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40214 + if (tsz > buflen)
40215 tsz = buflen;
40216 -
40217 +
40218 while (buflen) {
40219 struct kcore_list *m;
40220
40221 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40222 kfree(elf_buf);
40223 } else {
40224 if (kern_addr_valid(start)) {
40225 - unsigned long n;
40226 + char *elf_buf;
40227 + mm_segment_t oldfs;
40228
40229 - n = copy_to_user(buffer, (char *)start, tsz);
40230 - /*
40231 - * We cannot distingush between fault on source
40232 - * and fault on destination. When this happens
40233 - * we clear too and hope it will trigger the
40234 - * EFAULT again.
40235 - */
40236 - if (n) {
40237 - if (clear_user(buffer + tsz - n,
40238 - n))
40239 + elf_buf = kmalloc(tsz, GFP_KERNEL);
40240 + if (!elf_buf)
40241 + return -ENOMEM;
40242 + oldfs = get_fs();
40243 + set_fs(KERNEL_DS);
40244 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40245 + set_fs(oldfs);
40246 + if (copy_to_user(buffer, elf_buf, tsz)) {
40247 + kfree(elf_buf);
40248 return -EFAULT;
40249 + }
40250 }
40251 + set_fs(oldfs);
40252 + kfree(elf_buf);
40253 } else {
40254 if (clear_user(buffer, tsz))
40255 return -EFAULT;
40256 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40257
40258 static int open_kcore(struct inode *inode, struct file *filp)
40259 {
40260 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40261 + return -EPERM;
40262 +#endif
40263 if (!capable(CAP_SYS_RAWIO))
40264 return -EPERM;
40265 if (kcore_need_update)
40266 diff -urNp linux-2.6.39.4/fs/proc/meminfo.c linux-2.6.39.4/fs/proc/meminfo.c
40267 --- linux-2.6.39.4/fs/proc/meminfo.c 2011-05-19 00:06:34.000000000 -0400
40268 +++ linux-2.6.39.4/fs/proc/meminfo.c 2011-08-05 19:44:37.000000000 -0400
40269 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40270 unsigned long pages[NR_LRU_LISTS];
40271 int lru;
40272
40273 + pax_track_stack();
40274 +
40275 /*
40276 * display in kilobytes.
40277 */
40278 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40279 vmi.used >> 10,
40280 vmi.largest_chunk >> 10
40281 #ifdef CONFIG_MEMORY_FAILURE
40282 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40283 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40284 #endif
40285 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40286 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40287 diff -urNp linux-2.6.39.4/fs/proc/nommu.c linux-2.6.39.4/fs/proc/nommu.c
40288 --- linux-2.6.39.4/fs/proc/nommu.c 2011-05-19 00:06:34.000000000 -0400
40289 +++ linux-2.6.39.4/fs/proc/nommu.c 2011-08-05 19:44:37.000000000 -0400
40290 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40291 if (len < 1)
40292 len = 1;
40293 seq_printf(m, "%*c", len, ' ');
40294 - seq_path(m, &file->f_path, "");
40295 + seq_path(m, &file->f_path, "\n\\");
40296 }
40297
40298 seq_putc(m, '\n');
40299 diff -urNp linux-2.6.39.4/fs/proc/proc_net.c linux-2.6.39.4/fs/proc/proc_net.c
40300 --- linux-2.6.39.4/fs/proc/proc_net.c 2011-05-19 00:06:34.000000000 -0400
40301 +++ linux-2.6.39.4/fs/proc/proc_net.c 2011-08-05 19:44:37.000000000 -0400
40302 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40303 struct task_struct *task;
40304 struct nsproxy *ns;
40305 struct net *net = NULL;
40306 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40307 + const struct cred *cred = current_cred();
40308 +#endif
40309 +
40310 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40311 + if (cred->fsuid)
40312 + return net;
40313 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40314 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40315 + return net;
40316 +#endif
40317
40318 rcu_read_lock();
40319 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40320 diff -urNp linux-2.6.39.4/fs/proc/proc_sysctl.c linux-2.6.39.4/fs/proc/proc_sysctl.c
40321 --- linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-05-19 00:06:34.000000000 -0400
40322 +++ linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-08-05 19:44:37.000000000 -0400
40323 @@ -8,6 +8,8 @@
40324 #include <linux/namei.h>
40325 #include "internal.h"
40326
40327 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40328 +
40329 static const struct dentry_operations proc_sys_dentry_operations;
40330 static const struct file_operations proc_sys_file_operations;
40331 static const struct inode_operations proc_sys_inode_operations;
40332 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40333 if (!p)
40334 goto out;
40335
40336 + if (gr_handle_sysctl(p, MAY_EXEC))
40337 + goto out;
40338 +
40339 err = ERR_PTR(-ENOMEM);
40340 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40341 if (h)
40342 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40343 if (*pos < file->f_pos)
40344 continue;
40345
40346 + if (gr_handle_sysctl(table, 0))
40347 + continue;
40348 +
40349 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40350 if (res)
40351 return res;
40352 @@ -358,6 +366,9 @@ static int proc_sys_getattr(struct vfsmo
40353 if (IS_ERR(head))
40354 return PTR_ERR(head);
40355
40356 + if (table && gr_handle_sysctl(table, MAY_EXEC))
40357 + return -ENOENT;
40358 +
40359 generic_fillattr(inode, stat);
40360 if (table)
40361 stat->mode = (stat->mode & S_IFMT) | table->mode;
40362 diff -urNp linux-2.6.39.4/fs/proc/root.c linux-2.6.39.4/fs/proc/root.c
40363 --- linux-2.6.39.4/fs/proc/root.c 2011-05-19 00:06:34.000000000 -0400
40364 +++ linux-2.6.39.4/fs/proc/root.c 2011-08-05 19:44:37.000000000 -0400
40365 @@ -122,7 +122,15 @@ void __init proc_root_init(void)
40366 #ifdef CONFIG_PROC_DEVICETREE
40367 proc_device_tree_init();
40368 #endif
40369 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40370 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40371 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40372 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40373 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40374 +#endif
40375 +#else
40376 proc_mkdir("bus", NULL);
40377 +#endif
40378 proc_sys_init();
40379 }
40380
40381 diff -urNp linux-2.6.39.4/fs/proc/task_mmu.c linux-2.6.39.4/fs/proc/task_mmu.c
40382 --- linux-2.6.39.4/fs/proc/task_mmu.c 2011-05-19 00:06:34.000000000 -0400
40383 +++ linux-2.6.39.4/fs/proc/task_mmu.c 2011-08-05 19:44:37.000000000 -0400
40384 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40385 "VmExe:\t%8lu kB\n"
40386 "VmLib:\t%8lu kB\n"
40387 "VmPTE:\t%8lu kB\n"
40388 - "VmSwap:\t%8lu kB\n",
40389 - hiwater_vm << (PAGE_SHIFT-10),
40390 + "VmSwap:\t%8lu kB\n"
40391 +
40392 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40393 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40394 +#endif
40395 +
40396 + ,hiwater_vm << (PAGE_SHIFT-10),
40397 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40398 mm->locked_vm << (PAGE_SHIFT-10),
40399 hiwater_rss << (PAGE_SHIFT-10),
40400 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40401 data << (PAGE_SHIFT-10),
40402 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40403 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40404 - swap << (PAGE_SHIFT-10));
40405 + swap << (PAGE_SHIFT-10)
40406 +
40407 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40408 + , mm->context.user_cs_base, mm->context.user_cs_limit
40409 +#endif
40410 +
40411 + );
40412 }
40413
40414 unsigned long task_vsize(struct mm_struct *mm)
40415 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40416 return ret;
40417 }
40418
40419 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40420 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40421 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
40422 + _mm->pax_flags & MF_PAX_SEGMEXEC))
40423 +#endif
40424 +
40425 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40426 {
40427 struct mm_struct *mm = vma->vm_mm;
40428 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40429 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40430 }
40431
40432 - /* We don't show the stack guard page in /proc/maps */
40433 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40434 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40435 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40436 +#else
40437 start = vma->vm_start;
40438 - if (stack_guard_page_start(vma, start))
40439 - start += PAGE_SIZE;
40440 end = vma->vm_end;
40441 - if (stack_guard_page_end(vma, end))
40442 - end -= PAGE_SIZE;
40443 +#endif
40444
40445 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40446 start,
40447 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40448 flags & VM_WRITE ? 'w' : '-',
40449 flags & VM_EXEC ? 'x' : '-',
40450 flags & VM_MAYSHARE ? 's' : 'p',
40451 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40452 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40453 +#else
40454 pgoff,
40455 +#endif
40456 MAJOR(dev), MINOR(dev), ino, &len);
40457
40458 /*
40459 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40460 */
40461 if (file) {
40462 pad_len_spaces(m, len);
40463 - seq_path(m, &file->f_path, "\n");
40464 + seq_path(m, &file->f_path, "\n\\");
40465 } else {
40466 const char *name = arch_vma_name(vma);
40467 if (!name) {
40468 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40469 if (vma->vm_start <= mm->brk &&
40470 vma->vm_end >= mm->start_brk) {
40471 name = "[heap]";
40472 - } else if (vma->vm_start <= mm->start_stack &&
40473 - vma->vm_end >= mm->start_stack) {
40474 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40475 + (vma->vm_start <= mm->start_stack &&
40476 + vma->vm_end >= mm->start_stack)) {
40477 name = "[stack]";
40478 }
40479 } else {
40480 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40481 };
40482
40483 memset(&mss, 0, sizeof mss);
40484 - mss.vma = vma;
40485 - /* mmap_sem is held in m_start */
40486 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40487 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40488 -
40489 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40490 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40491 +#endif
40492 + mss.vma = vma;
40493 + /* mmap_sem is held in m_start */
40494 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40495 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40496 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40497 + }
40498 +#endif
40499 show_map_vma(m, vma);
40500
40501 seq_printf(m,
40502 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40503 "KernelPageSize: %8lu kB\n"
40504 "MMUPageSize: %8lu kB\n"
40505 "Locked: %8lu kB\n",
40506 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40507 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40508 +#else
40509 (vma->vm_end - vma->vm_start) >> 10,
40510 +#endif
40511 mss.resident >> 10,
40512 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40513 mss.shared_clean >> 10,
40514 diff -urNp linux-2.6.39.4/fs/proc/task_nommu.c linux-2.6.39.4/fs/proc/task_nommu.c
40515 --- linux-2.6.39.4/fs/proc/task_nommu.c 2011-05-19 00:06:34.000000000 -0400
40516 +++ linux-2.6.39.4/fs/proc/task_nommu.c 2011-08-05 19:44:37.000000000 -0400
40517 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40518 else
40519 bytes += kobjsize(mm);
40520
40521 - if (current->fs && current->fs->users > 1)
40522 + if (current->fs && atomic_read(&current->fs->users) > 1)
40523 sbytes += kobjsize(current->fs);
40524 else
40525 bytes += kobjsize(current->fs);
40526 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40527
40528 if (file) {
40529 pad_len_spaces(m, len);
40530 - seq_path(m, &file->f_path, "");
40531 + seq_path(m, &file->f_path, "\n\\");
40532 } else if (mm) {
40533 if (vma->vm_start <= mm->start_stack &&
40534 vma->vm_end >= mm->start_stack) {
40535 diff -urNp linux-2.6.39.4/fs/quota/netlink.c linux-2.6.39.4/fs/quota/netlink.c
40536 --- linux-2.6.39.4/fs/quota/netlink.c 2011-05-19 00:06:34.000000000 -0400
40537 +++ linux-2.6.39.4/fs/quota/netlink.c 2011-08-05 19:44:37.000000000 -0400
40538 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40539 void quota_send_warning(short type, unsigned int id, dev_t dev,
40540 const char warntype)
40541 {
40542 - static atomic_t seq;
40543 + static atomic_unchecked_t seq;
40544 struct sk_buff *skb;
40545 void *msg_head;
40546 int ret;
40547 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40548 "VFS: Not enough memory to send quota warning.\n");
40549 return;
40550 }
40551 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40552 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40553 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40554 if (!msg_head) {
40555 printk(KERN_ERR
40556 diff -urNp linux-2.6.39.4/fs/readdir.c linux-2.6.39.4/fs/readdir.c
40557 --- linux-2.6.39.4/fs/readdir.c 2011-05-19 00:06:34.000000000 -0400
40558 +++ linux-2.6.39.4/fs/readdir.c 2011-08-05 19:44:37.000000000 -0400
40559 @@ -17,6 +17,7 @@
40560 #include <linux/security.h>
40561 #include <linux/syscalls.h>
40562 #include <linux/unistd.h>
40563 +#include <linux/namei.h>
40564
40565 #include <asm/uaccess.h>
40566
40567 @@ -67,6 +68,7 @@ struct old_linux_dirent {
40568
40569 struct readdir_callback {
40570 struct old_linux_dirent __user * dirent;
40571 + struct file * file;
40572 int result;
40573 };
40574
40575 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40576 buf->result = -EOVERFLOW;
40577 return -EOVERFLOW;
40578 }
40579 +
40580 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40581 + return 0;
40582 +
40583 buf->result++;
40584 dirent = buf->dirent;
40585 if (!access_ok(VERIFY_WRITE, dirent,
40586 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40587
40588 buf.result = 0;
40589 buf.dirent = dirent;
40590 + buf.file = file;
40591
40592 error = vfs_readdir(file, fillonedir, &buf);
40593 if (buf.result)
40594 @@ -142,6 +149,7 @@ struct linux_dirent {
40595 struct getdents_callback {
40596 struct linux_dirent __user * current_dir;
40597 struct linux_dirent __user * previous;
40598 + struct file * file;
40599 int count;
40600 int error;
40601 };
40602 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40603 buf->error = -EOVERFLOW;
40604 return -EOVERFLOW;
40605 }
40606 +
40607 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40608 + return 0;
40609 +
40610 dirent = buf->previous;
40611 if (dirent) {
40612 if (__put_user(offset, &dirent->d_off))
40613 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40614 buf.previous = NULL;
40615 buf.count = count;
40616 buf.error = 0;
40617 + buf.file = file;
40618
40619 error = vfs_readdir(file, filldir, &buf);
40620 if (error >= 0)
40621 @@ -229,6 +242,7 @@ out:
40622 struct getdents_callback64 {
40623 struct linux_dirent64 __user * current_dir;
40624 struct linux_dirent64 __user * previous;
40625 + struct file *file;
40626 int count;
40627 int error;
40628 };
40629 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40630 buf->error = -EINVAL; /* only used if we fail.. */
40631 if (reclen > buf->count)
40632 return -EINVAL;
40633 +
40634 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40635 + return 0;
40636 +
40637 dirent = buf->previous;
40638 if (dirent) {
40639 if (__put_user(offset, &dirent->d_off))
40640 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40641
40642 buf.current_dir = dirent;
40643 buf.previous = NULL;
40644 + buf.file = file;
40645 buf.count = count;
40646 buf.error = 0;
40647
40648 diff -urNp linux-2.6.39.4/fs/reiserfs/dir.c linux-2.6.39.4/fs/reiserfs/dir.c
40649 --- linux-2.6.39.4/fs/reiserfs/dir.c 2011-05-19 00:06:34.000000000 -0400
40650 +++ linux-2.6.39.4/fs/reiserfs/dir.c 2011-08-05 19:44:37.000000000 -0400
40651 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40652 struct reiserfs_dir_entry de;
40653 int ret = 0;
40654
40655 + pax_track_stack();
40656 +
40657 reiserfs_write_lock(inode->i_sb);
40658
40659 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40660 diff -urNp linux-2.6.39.4/fs/reiserfs/do_balan.c linux-2.6.39.4/fs/reiserfs/do_balan.c
40661 --- linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-05-19 00:06:34.000000000 -0400
40662 +++ linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-08-05 19:44:37.000000000 -0400
40663 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40664 return;
40665 }
40666
40667 - atomic_inc(&(fs_generation(tb->tb_sb)));
40668 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40669 do_balance_starts(tb);
40670
40671 /* balance leaf returns 0 except if combining L R and S into
40672 diff -urNp linux-2.6.39.4/fs/reiserfs/journal.c linux-2.6.39.4/fs/reiserfs/journal.c
40673 --- linux-2.6.39.4/fs/reiserfs/journal.c 2011-05-19 00:06:34.000000000 -0400
40674 +++ linux-2.6.39.4/fs/reiserfs/journal.c 2011-08-05 19:44:37.000000000 -0400
40675 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40676 struct buffer_head *bh;
40677 int i, j;
40678
40679 + pax_track_stack();
40680 +
40681 bh = __getblk(dev, block, bufsize);
40682 if (buffer_uptodate(bh))
40683 return (bh);
40684 diff -urNp linux-2.6.39.4/fs/reiserfs/namei.c linux-2.6.39.4/fs/reiserfs/namei.c
40685 --- linux-2.6.39.4/fs/reiserfs/namei.c 2011-05-19 00:06:34.000000000 -0400
40686 +++ linux-2.6.39.4/fs/reiserfs/namei.c 2011-08-05 19:44:37.000000000 -0400
40687 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40688 unsigned long savelink = 1;
40689 struct timespec ctime;
40690
40691 + pax_track_stack();
40692 +
40693 /* three balancings: (1) old name removal, (2) new name insertion
40694 and (3) maybe "save" link insertion
40695 stat data updates: (1) old directory,
40696 diff -urNp linux-2.6.39.4/fs/reiserfs/procfs.c linux-2.6.39.4/fs/reiserfs/procfs.c
40697 --- linux-2.6.39.4/fs/reiserfs/procfs.c 2011-05-19 00:06:34.000000000 -0400
40698 +++ linux-2.6.39.4/fs/reiserfs/procfs.c 2011-08-05 19:44:37.000000000 -0400
40699 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40700 "SMALL_TAILS " : "NO_TAILS ",
40701 replay_only(sb) ? "REPLAY_ONLY " : "",
40702 convert_reiserfs(sb) ? "CONV " : "",
40703 - atomic_read(&r->s_generation_counter),
40704 + atomic_read_unchecked(&r->s_generation_counter),
40705 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40706 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40707 SF(s_good_search_by_key_reada), SF(s_bmaps),
40708 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40709 struct journal_params *jp = &rs->s_v1.s_journal;
40710 char b[BDEVNAME_SIZE];
40711
40712 + pax_track_stack();
40713 +
40714 seq_printf(m, /* on-disk fields */
40715 "jp_journal_1st_block: \t%i\n"
40716 "jp_journal_dev: \t%s[%x]\n"
40717 diff -urNp linux-2.6.39.4/fs/reiserfs/stree.c linux-2.6.39.4/fs/reiserfs/stree.c
40718 --- linux-2.6.39.4/fs/reiserfs/stree.c 2011-05-19 00:06:34.000000000 -0400
40719 +++ linux-2.6.39.4/fs/reiserfs/stree.c 2011-08-05 19:44:37.000000000 -0400
40720 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40721 int iter = 0;
40722 #endif
40723
40724 + pax_track_stack();
40725 +
40726 BUG_ON(!th->t_trans_id);
40727
40728 init_tb_struct(th, &s_del_balance, sb, path,
40729 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40730 int retval;
40731 int quota_cut_bytes = 0;
40732
40733 + pax_track_stack();
40734 +
40735 BUG_ON(!th->t_trans_id);
40736
40737 le_key2cpu_key(&cpu_key, key);
40738 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40739 int quota_cut_bytes;
40740 loff_t tail_pos = 0;
40741
40742 + pax_track_stack();
40743 +
40744 BUG_ON(!th->t_trans_id);
40745
40746 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40747 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40748 int retval;
40749 int fs_gen;
40750
40751 + pax_track_stack();
40752 +
40753 BUG_ON(!th->t_trans_id);
40754
40755 fs_gen = get_generation(inode->i_sb);
40756 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40757 int fs_gen = 0;
40758 int quota_bytes = 0;
40759
40760 + pax_track_stack();
40761 +
40762 BUG_ON(!th->t_trans_id);
40763
40764 if (inode) { /* Do we count quotas for item? */
40765 diff -urNp linux-2.6.39.4/fs/reiserfs/super.c linux-2.6.39.4/fs/reiserfs/super.c
40766 --- linux-2.6.39.4/fs/reiserfs/super.c 2011-05-19 00:06:34.000000000 -0400
40767 +++ linux-2.6.39.4/fs/reiserfs/super.c 2011-08-05 19:44:37.000000000 -0400
40768 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40769 {.option_name = NULL}
40770 };
40771
40772 + pax_track_stack();
40773 +
40774 *blocks = 0;
40775 if (!options || !*options)
40776 /* use default configuration: create tails, journaling on, no
40777 diff -urNp linux-2.6.39.4/fs/select.c linux-2.6.39.4/fs/select.c
40778 --- linux-2.6.39.4/fs/select.c 2011-05-19 00:06:34.000000000 -0400
40779 +++ linux-2.6.39.4/fs/select.c 2011-08-05 19:44:37.000000000 -0400
40780 @@ -20,6 +20,7 @@
40781 #include <linux/module.h>
40782 #include <linux/slab.h>
40783 #include <linux/poll.h>
40784 +#include <linux/security.h>
40785 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40786 #include <linux/file.h>
40787 #include <linux/fdtable.h>
40788 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40789 int retval, i, timed_out = 0;
40790 unsigned long slack = 0;
40791
40792 + pax_track_stack();
40793 +
40794 rcu_read_lock();
40795 retval = max_select_fd(n, fds);
40796 rcu_read_unlock();
40797 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40798 /* Allocate small arguments on the stack to save memory and be faster */
40799 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40800
40801 + pax_track_stack();
40802 +
40803 ret = -EINVAL;
40804 if (n < 0)
40805 goto out_nofds;
40806 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40807 struct poll_list *walk = head;
40808 unsigned long todo = nfds;
40809
40810 + pax_track_stack();
40811 +
40812 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40813 if (nfds > rlimit(RLIMIT_NOFILE))
40814 return -EINVAL;
40815
40816 diff -urNp linux-2.6.39.4/fs/seq_file.c linux-2.6.39.4/fs/seq_file.c
40817 --- linux-2.6.39.4/fs/seq_file.c 2011-05-19 00:06:34.000000000 -0400
40818 +++ linux-2.6.39.4/fs/seq_file.c 2011-08-05 20:34:06.000000000 -0400
40819 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40820 return 0;
40821 }
40822 if (!m->buf) {
40823 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40824 + m->size = PAGE_SIZE;
40825 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40826 if (!m->buf)
40827 return -ENOMEM;
40828 }
40829 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40830 Eoverflow:
40831 m->op->stop(m, p);
40832 kfree(m->buf);
40833 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40834 + m->size <<= 1;
40835 + m->buf = kmalloc(m->size, GFP_KERNEL);
40836 return !m->buf ? -ENOMEM : -EAGAIN;
40837 }
40838
40839 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40840 m->version = file->f_version;
40841 /* grab buffer if we didn't have one */
40842 if (!m->buf) {
40843 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40844 + m->size = PAGE_SIZE;
40845 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40846 if (!m->buf)
40847 goto Enomem;
40848 }
40849 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40850 goto Fill;
40851 m->op->stop(m, p);
40852 kfree(m->buf);
40853 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40854 + m->size <<= 1;
40855 + m->buf = kmalloc(m->size, GFP_KERNEL);
40856 if (!m->buf)
40857 goto Enomem;
40858 m->count = 0;
40859 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40860 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40861 void *data)
40862 {
40863 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40864 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40865 int res = -ENOMEM;
40866
40867 if (op) {
40868 diff -urNp linux-2.6.39.4/fs/splice.c linux-2.6.39.4/fs/splice.c
40869 --- linux-2.6.39.4/fs/splice.c 2011-05-19 00:06:34.000000000 -0400
40870 +++ linux-2.6.39.4/fs/splice.c 2011-08-05 19:44:37.000000000 -0400
40871 @@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40872 pipe_lock(pipe);
40873
40874 for (;;) {
40875 - if (!pipe->readers) {
40876 + if (!atomic_read(&pipe->readers)) {
40877 send_sig(SIGPIPE, current, 0);
40878 if (!ret)
40879 ret = -EPIPE;
40880 @@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40881 do_wakeup = 0;
40882 }
40883
40884 - pipe->waiting_writers++;
40885 + atomic_inc(&pipe->waiting_writers);
40886 pipe_wait(pipe);
40887 - pipe->waiting_writers--;
40888 + atomic_dec(&pipe->waiting_writers);
40889 }
40890
40891 pipe_unlock(pipe);
40892 @@ -316,6 +316,8 @@ __generic_file_splice_read(struct file *
40893 .spd_release = spd_release_page,
40894 };
40895
40896 + pax_track_stack();
40897 +
40898 if (splice_grow_spd(pipe, &spd))
40899 return -ENOMEM;
40900
40901 @@ -556,7 +558,7 @@ static ssize_t kernel_readv(struct file
40902 old_fs = get_fs();
40903 set_fs(get_ds());
40904 /* The cast to a user pointer is valid due to the set_fs() */
40905 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40906 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40907 set_fs(old_fs);
40908
40909 return res;
40910 @@ -571,7 +573,7 @@ static ssize_t kernel_write(struct file
40911 old_fs = get_fs();
40912 set_fs(get_ds());
40913 /* The cast to a user pointer is valid due to the set_fs() */
40914 - res = vfs_write(file, (const char __user *)buf, count, &pos);
40915 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
40916 set_fs(old_fs);
40917
40918 return res;
40919 @@ -599,6 +601,8 @@ ssize_t default_file_splice_read(struct
40920 .spd_release = spd_release_page,
40921 };
40922
40923 + pax_track_stack();
40924 +
40925 if (splice_grow_spd(pipe, &spd))
40926 return -ENOMEM;
40927
40928 @@ -622,7 +626,7 @@ ssize_t default_file_splice_read(struct
40929 goto err;
40930
40931 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
40932 - vec[i].iov_base = (void __user *) page_address(page);
40933 + vec[i].iov_base = (__force void __user *) page_address(page);
40934 vec[i].iov_len = this_len;
40935 spd.pages[i] = page;
40936 spd.nr_pages++;
40937 @@ -842,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
40938 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
40939 {
40940 while (!pipe->nrbufs) {
40941 - if (!pipe->writers)
40942 + if (!atomic_read(&pipe->writers))
40943 return 0;
40944
40945 - if (!pipe->waiting_writers && sd->num_spliced)
40946 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
40947 return 0;
40948
40949 if (sd->flags & SPLICE_F_NONBLOCK)
40950 @@ -1178,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct fi
40951 * out of the pipe right after the splice_to_pipe(). So set
40952 * PIPE_READERS appropriately.
40953 */
40954 - pipe->readers = 1;
40955 + atomic_set(&pipe->readers, 1);
40956
40957 current->splice_pipe = pipe;
40958 }
40959 @@ -1615,6 +1619,8 @@ static long vmsplice_to_pipe(struct file
40960 };
40961 long ret;
40962
40963 + pax_track_stack();
40964 +
40965 pipe = get_pipe_info(file);
40966 if (!pipe)
40967 return -EBADF;
40968 @@ -1730,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_
40969 ret = -ERESTARTSYS;
40970 break;
40971 }
40972 - if (!pipe->writers)
40973 + if (!atomic_read(&pipe->writers))
40974 break;
40975 - if (!pipe->waiting_writers) {
40976 + if (!atomic_read(&pipe->waiting_writers)) {
40977 if (flags & SPLICE_F_NONBLOCK) {
40978 ret = -EAGAIN;
40979 break;
40980 @@ -1764,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_
40981 pipe_lock(pipe);
40982
40983 while (pipe->nrbufs >= pipe->buffers) {
40984 - if (!pipe->readers) {
40985 + if (!atomic_read(&pipe->readers)) {
40986 send_sig(SIGPIPE, current, 0);
40987 ret = -EPIPE;
40988 break;
40989 @@ -1777,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_
40990 ret = -ERESTARTSYS;
40991 break;
40992 }
40993 - pipe->waiting_writers++;
40994 + atomic_inc(&pipe->waiting_writers);
40995 pipe_wait(pipe);
40996 - pipe->waiting_writers--;
40997 + atomic_dec(&pipe->waiting_writers);
40998 }
40999
41000 pipe_unlock(pipe);
41001 @@ -1815,14 +1821,14 @@ retry:
41002 pipe_double_lock(ipipe, opipe);
41003
41004 do {
41005 - if (!opipe->readers) {
41006 + if (!atomic_read(&opipe->readers)) {
41007 send_sig(SIGPIPE, current, 0);
41008 if (!ret)
41009 ret = -EPIPE;
41010 break;
41011 }
41012
41013 - if (!ipipe->nrbufs && !ipipe->writers)
41014 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
41015 break;
41016
41017 /*
41018 @@ -1922,7 +1928,7 @@ static int link_pipe(struct pipe_inode_i
41019 pipe_double_lock(ipipe, opipe);
41020
41021 do {
41022 - if (!opipe->readers) {
41023 + if (!atomic_read(&opipe->readers)) {
41024 send_sig(SIGPIPE, current, 0);
41025 if (!ret)
41026 ret = -EPIPE;
41027 @@ -1967,7 +1973,7 @@ static int link_pipe(struct pipe_inode_i
41028 * return EAGAIN if we have the potential of some data in the
41029 * future, otherwise just return 0
41030 */
41031 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
41032 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
41033 ret = -EAGAIN;
41034
41035 pipe_unlock(ipipe);
41036 diff -urNp linux-2.6.39.4/fs/sysfs/file.c linux-2.6.39.4/fs/sysfs/file.c
41037 --- linux-2.6.39.4/fs/sysfs/file.c 2011-05-19 00:06:34.000000000 -0400
41038 +++ linux-2.6.39.4/fs/sysfs/file.c 2011-08-05 19:44:37.000000000 -0400
41039 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
41040
41041 struct sysfs_open_dirent {
41042 atomic_t refcnt;
41043 - atomic_t event;
41044 + atomic_unchecked_t event;
41045 wait_queue_head_t poll;
41046 struct list_head buffers; /* goes through sysfs_buffer.list */
41047 };
41048 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
41049 if (!sysfs_get_active(attr_sd))
41050 return -ENODEV;
41051
41052 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41053 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41054 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41055
41056 sysfs_put_active(attr_sd);
41057 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
41058 return -ENOMEM;
41059
41060 atomic_set(&new_od->refcnt, 0);
41061 - atomic_set(&new_od->event, 1);
41062 + atomic_set_unchecked(&new_od->event, 1);
41063 init_waitqueue_head(&new_od->poll);
41064 INIT_LIST_HEAD(&new_od->buffers);
41065 goto retry;
41066 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
41067
41068 sysfs_put_active(attr_sd);
41069
41070 - if (buffer->event != atomic_read(&od->event))
41071 + if (buffer->event != atomic_read_unchecked(&od->event))
41072 goto trigger;
41073
41074 return DEFAULT_POLLMASK;
41075 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
41076
41077 od = sd->s_attr.open;
41078 if (od) {
41079 - atomic_inc(&od->event);
41080 + atomic_inc_unchecked(&od->event);
41081 wake_up_interruptible(&od->poll);
41082 }
41083
41084 diff -urNp linux-2.6.39.4/fs/sysfs/mount.c linux-2.6.39.4/fs/sysfs/mount.c
41085 --- linux-2.6.39.4/fs/sysfs/mount.c 2011-05-19 00:06:34.000000000 -0400
41086 +++ linux-2.6.39.4/fs/sysfs/mount.c 2011-08-05 19:44:37.000000000 -0400
41087 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41088 .s_name = "",
41089 .s_count = ATOMIC_INIT(1),
41090 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41091 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41092 + .s_mode = S_IFDIR | S_IRWXU,
41093 +#else
41094 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41095 +#endif
41096 .s_ino = 1,
41097 };
41098
41099 diff -urNp linux-2.6.39.4/fs/sysfs/symlink.c linux-2.6.39.4/fs/sysfs/symlink.c
41100 --- linux-2.6.39.4/fs/sysfs/symlink.c 2011-05-19 00:06:34.000000000 -0400
41101 +++ linux-2.6.39.4/fs/sysfs/symlink.c 2011-08-05 19:44:37.000000000 -0400
41102 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41103
41104 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41105 {
41106 - char *page = nd_get_link(nd);
41107 + const char *page = nd_get_link(nd);
41108 if (!IS_ERR(page))
41109 free_page((unsigned long)page);
41110 }
41111 diff -urNp linux-2.6.39.4/fs/udf/inode.c linux-2.6.39.4/fs/udf/inode.c
41112 --- linux-2.6.39.4/fs/udf/inode.c 2011-05-19 00:06:34.000000000 -0400
41113 +++ linux-2.6.39.4/fs/udf/inode.c 2011-08-05 19:44:37.000000000 -0400
41114 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41115 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41116 int lastblock = 0;
41117
41118 + pax_track_stack();
41119 +
41120 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41121 prev_epos.block = iinfo->i_location;
41122 prev_epos.bh = NULL;
41123 diff -urNp linux-2.6.39.4/fs/udf/misc.c linux-2.6.39.4/fs/udf/misc.c
41124 --- linux-2.6.39.4/fs/udf/misc.c 2011-05-19 00:06:34.000000000 -0400
41125 +++ linux-2.6.39.4/fs/udf/misc.c 2011-08-05 19:44:37.000000000 -0400
41126 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41127
41128 u8 udf_tag_checksum(const struct tag *t)
41129 {
41130 - u8 *data = (u8 *)t;
41131 + const u8 *data = (const u8 *)t;
41132 u8 checksum = 0;
41133 int i;
41134 for (i = 0; i < sizeof(struct tag); ++i)
41135 diff -urNp linux-2.6.39.4/fs/utimes.c linux-2.6.39.4/fs/utimes.c
41136 --- linux-2.6.39.4/fs/utimes.c 2011-05-19 00:06:34.000000000 -0400
41137 +++ linux-2.6.39.4/fs/utimes.c 2011-08-05 19:44:37.000000000 -0400
41138 @@ -1,6 +1,7 @@
41139 #include <linux/compiler.h>
41140 #include <linux/file.h>
41141 #include <linux/fs.h>
41142 +#include <linux/security.h>
41143 #include <linux/linkage.h>
41144 #include <linux/mount.h>
41145 #include <linux/namei.h>
41146 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41147 goto mnt_drop_write_and_out;
41148 }
41149 }
41150 +
41151 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41152 + error = -EACCES;
41153 + goto mnt_drop_write_and_out;
41154 + }
41155 +
41156 mutex_lock(&inode->i_mutex);
41157 error = notify_change(path->dentry, &newattrs);
41158 mutex_unlock(&inode->i_mutex);
41159 diff -urNp linux-2.6.39.4/fs/xattr_acl.c linux-2.6.39.4/fs/xattr_acl.c
41160 --- linux-2.6.39.4/fs/xattr_acl.c 2011-05-19 00:06:34.000000000 -0400
41161 +++ linux-2.6.39.4/fs/xattr_acl.c 2011-08-05 19:44:37.000000000 -0400
41162 @@ -17,8 +17,8 @@
41163 struct posix_acl *
41164 posix_acl_from_xattr(const void *value, size_t size)
41165 {
41166 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41167 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41168 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41169 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41170 int count;
41171 struct posix_acl *acl;
41172 struct posix_acl_entry *acl_e;
41173 diff -urNp linux-2.6.39.4/fs/xattr.c linux-2.6.39.4/fs/xattr.c
41174 --- linux-2.6.39.4/fs/xattr.c 2011-05-19 00:06:34.000000000 -0400
41175 +++ linux-2.6.39.4/fs/xattr.c 2011-08-05 19:44:37.000000000 -0400
41176 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41177 * Extended attribute SET operations
41178 */
41179 static long
41180 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
41181 +setxattr(struct path *path, const char __user *name, const void __user *value,
41182 size_t size, int flags)
41183 {
41184 int error;
41185 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
41186 return PTR_ERR(kvalue);
41187 }
41188
41189 - error = vfs_setxattr(d, kname, kvalue, size, flags);
41190 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41191 + error = -EACCES;
41192 + goto out;
41193 + }
41194 +
41195 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41196 +out:
41197 kfree(kvalue);
41198 return error;
41199 }
41200 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41201 return error;
41202 error = mnt_want_write(path.mnt);
41203 if (!error) {
41204 - error = setxattr(path.dentry, name, value, size, flags);
41205 + error = setxattr(&path, name, value, size, flags);
41206 mnt_drop_write(path.mnt);
41207 }
41208 path_put(&path);
41209 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41210 return error;
41211 error = mnt_want_write(path.mnt);
41212 if (!error) {
41213 - error = setxattr(path.dentry, name, value, size, flags);
41214 + error = setxattr(&path, name, value, size, flags);
41215 mnt_drop_write(path.mnt);
41216 }
41217 path_put(&path);
41218 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41219 const void __user *,value, size_t, size, int, flags)
41220 {
41221 struct file *f;
41222 - struct dentry *dentry;
41223 int error = -EBADF;
41224
41225 f = fget(fd);
41226 if (!f)
41227 return error;
41228 - dentry = f->f_path.dentry;
41229 - audit_inode(NULL, dentry);
41230 + audit_inode(NULL, f->f_path.dentry);
41231 error = mnt_want_write_file(f);
41232 if (!error) {
41233 - error = setxattr(dentry, name, value, size, flags);
41234 + error = setxattr(&f->f_path, name, value, size, flags);
41235 mnt_drop_write(f->f_path.mnt);
41236 }
41237 fput(f);
41238 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c
41239 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-05-19 00:06:34.000000000 -0400
41240 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-05 19:44:37.000000000 -0400
41241 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41242 xfs_fsop_geom_t fsgeo;
41243 int error;
41244
41245 + memset(&fsgeo, 0, sizeof(fsgeo));
41246 error = xfs_fs_geometry(mp, &fsgeo, 3);
41247 if (error)
41248 return -error;
41249 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c
41250 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-05-19 00:06:34.000000000 -0400
41251 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-05 19:44:37.000000000 -0400
41252 @@ -128,7 +128,7 @@ xfs_find_handle(
41253 }
41254
41255 error = -EFAULT;
41256 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41257 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41258 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41259 goto out_put;
41260
41261 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c
41262 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-05-19 00:06:34.000000000 -0400
41263 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-05 19:44:37.000000000 -0400
41264 @@ -437,7 +437,7 @@ xfs_vn_put_link(
41265 struct nameidata *nd,
41266 void *p)
41267 {
41268 - char *s = nd_get_link(nd);
41269 + const char *s = nd_get_link(nd);
41270
41271 if (!IS_ERR(s))
41272 kfree(s);
41273 diff -urNp linux-2.6.39.4/fs/xfs/xfs_bmap.c linux-2.6.39.4/fs/xfs/xfs_bmap.c
41274 --- linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-05-19 00:06:34.000000000 -0400
41275 +++ linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-08-05 19:44:37.000000000 -0400
41276 @@ -287,7 +287,7 @@ xfs_bmap_validate_ret(
41277 int nmap,
41278 int ret_nmap);
41279 #else
41280 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41281 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41282 #endif /* DEBUG */
41283
41284 STATIC int
41285 diff -urNp linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c
41286 --- linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-05-19 00:06:34.000000000 -0400
41287 +++ linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-08-05 19:44:37.000000000 -0400
41288 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41289 }
41290
41291 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41292 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41293 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41294 + char name[sfep->namelen];
41295 + memcpy(name, sfep->name, sfep->namelen);
41296 + if (filldir(dirent, name, sfep->namelen,
41297 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
41298 + *offset = off & 0x7fffffff;
41299 + return 0;
41300 + }
41301 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41302 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41303 *offset = off & 0x7fffffff;
41304 return 0;
41305 diff -urNp linux-2.6.39.4/grsecurity/gracl_alloc.c linux-2.6.39.4/grsecurity/gracl_alloc.c
41306 --- linux-2.6.39.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41307 +++ linux-2.6.39.4/grsecurity/gracl_alloc.c 2011-08-05 19:44:37.000000000 -0400
41308 @@ -0,0 +1,105 @@
41309 +#include <linux/kernel.h>
41310 +#include <linux/mm.h>
41311 +#include <linux/slab.h>
41312 +#include <linux/vmalloc.h>
41313 +#include <linux/gracl.h>
41314 +#include <linux/grsecurity.h>
41315 +
41316 +static unsigned long alloc_stack_next = 1;
41317 +static unsigned long alloc_stack_size = 1;
41318 +static void **alloc_stack;
41319 +
41320 +static __inline__ int
41321 +alloc_pop(void)
41322 +{
41323 + if (alloc_stack_next == 1)
41324 + return 0;
41325 +
41326 + kfree(alloc_stack[alloc_stack_next - 2]);
41327 +
41328 + alloc_stack_next--;
41329 +
41330 + return 1;
41331 +}
41332 +
41333 +static __inline__ int
41334 +alloc_push(void *buf)
41335 +{
41336 + if (alloc_stack_next >= alloc_stack_size)
41337 + return 1;
41338 +
41339 + alloc_stack[alloc_stack_next - 1] = buf;
41340 +
41341 + alloc_stack_next++;
41342 +
41343 + return 0;
41344 +}
41345 +
41346 +void *
41347 +acl_alloc(unsigned long len)
41348 +{
41349 + void *ret = NULL;
41350 +
41351 + if (!len || len > PAGE_SIZE)
41352 + goto out;
41353 +
41354 + ret = kmalloc(len, GFP_KERNEL);
41355 +
41356 + if (ret) {
41357 + if (alloc_push(ret)) {
41358 + kfree(ret);
41359 + ret = NULL;
41360 + }
41361 + }
41362 +
41363 +out:
41364 + return ret;
41365 +}
41366 +
41367 +void *
41368 +acl_alloc_num(unsigned long num, unsigned long len)
41369 +{
41370 + if (!len || (num > (PAGE_SIZE / len)))
41371 + return NULL;
41372 +
41373 + return acl_alloc(num * len);
41374 +}
41375 +
41376 +void
41377 +acl_free_all(void)
41378 +{
41379 + if (gr_acl_is_enabled() || !alloc_stack)
41380 + return;
41381 +
41382 + while (alloc_pop()) ;
41383 +
41384 + if (alloc_stack) {
41385 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41386 + kfree(alloc_stack);
41387 + else
41388 + vfree(alloc_stack);
41389 + }
41390 +
41391 + alloc_stack = NULL;
41392 + alloc_stack_size = 1;
41393 + alloc_stack_next = 1;
41394 +
41395 + return;
41396 +}
41397 +
41398 +int
41399 +acl_alloc_stack_init(unsigned long size)
41400 +{
41401 + if ((size * sizeof (void *)) <= PAGE_SIZE)
41402 + alloc_stack =
41403 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41404 + else
41405 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
41406 +
41407 + alloc_stack_size = size;
41408 +
41409 + if (!alloc_stack)
41410 + return 0;
41411 + else
41412 + return 1;
41413 +}
41414 diff -urNp linux-2.6.39.4/grsecurity/gracl.c linux-2.6.39.4/grsecurity/gracl.c
41415 --- linux-2.6.39.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41416 +++ linux-2.6.39.4/grsecurity/gracl.c 2011-08-05 19:44:37.000000000 -0400
41417 @@ -0,0 +1,4106 @@
41418 +#include <linux/kernel.h>
41419 +#include <linux/module.h>
41420 +#include <linux/sched.h>
41421 +#include <linux/mm.h>
41422 +#include <linux/file.h>
41423 +#include <linux/fs.h>
41424 +#include <linux/namei.h>
41425 +#include <linux/mount.h>
41426 +#include <linux/tty.h>
41427 +#include <linux/proc_fs.h>
41428 +#include <linux/lglock.h>
41429 +#include <linux/slab.h>
41430 +#include <linux/vmalloc.h>
41431 +#include <linux/types.h>
41432 +#include <linux/sysctl.h>
41433 +#include <linux/netdevice.h>
41434 +#include <linux/ptrace.h>
41435 +#include <linux/gracl.h>
41436 +#include <linux/gralloc.h>
41437 +#include <linux/grsecurity.h>
41438 +#include <linux/grinternal.h>
41439 +#include <linux/pid_namespace.h>
41440 +#include <linux/fdtable.h>
41441 +#include <linux/percpu.h>
41442 +
41443 +#include <asm/uaccess.h>
41444 +#include <asm/errno.h>
41445 +#include <asm/mman.h>
41446 +
41447 +static struct acl_role_db acl_role_set;
41448 +static struct name_db name_set;
41449 +static struct inodev_db inodev_set;
41450 +
41451 +/* for keeping track of userspace pointers used for subjects, so we
41452 + can share references in the kernel as well
41453 +*/
41454 +
41455 +static struct path real_root;
41456 +
41457 +static struct acl_subj_map_db subj_map_set;
41458 +
41459 +static struct acl_role_label *default_role;
41460 +
41461 +static struct acl_role_label *role_list;
41462 +
41463 +static u16 acl_sp_role_value;
41464 +
41465 +extern char *gr_shared_page[4];
41466 +static DEFINE_MUTEX(gr_dev_mutex);
41467 +DEFINE_RWLOCK(gr_inode_lock);
41468 +
41469 +struct gr_arg *gr_usermode;
41470 +
41471 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
41472 +
41473 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41474 +extern void gr_clear_learn_entries(void);
41475 +
41476 +#ifdef CONFIG_GRKERNSEC_RESLOG
41477 +extern void gr_log_resource(const struct task_struct *task,
41478 + const int res, const unsigned long wanted, const int gt);
41479 +#endif
41480 +
41481 +unsigned char *gr_system_salt;
41482 +unsigned char *gr_system_sum;
41483 +
41484 +static struct sprole_pw **acl_special_roles = NULL;
41485 +static __u16 num_sprole_pws = 0;
41486 +
41487 +static struct acl_role_label *kernel_role = NULL;
41488 +
41489 +static unsigned int gr_auth_attempts = 0;
41490 +static unsigned long gr_auth_expires = 0UL;
41491 +
41492 +#ifdef CONFIG_NET
41493 +extern struct vfsmount *sock_mnt;
41494 +#endif
41495 +
41496 +extern struct vfsmount *pipe_mnt;
41497 +extern struct vfsmount *shm_mnt;
41498 +#ifdef CONFIG_HUGETLBFS
41499 +extern struct vfsmount *hugetlbfs_vfsmount;
41500 +#endif
41501 +
41502 +static struct acl_object_label *fakefs_obj_rw;
41503 +static struct acl_object_label *fakefs_obj_rwx;
41504 +
41505 +extern int gr_init_uidset(void);
41506 +extern void gr_free_uidset(void);
41507 +extern void gr_remove_uid(uid_t uid);
41508 +extern int gr_find_uid(uid_t uid);
41509 +
41510 +DECLARE_BRLOCK(vfsmount_lock);
41511 +
41512 +__inline__ int
41513 +gr_acl_is_enabled(void)
41514 +{
41515 + return (gr_status & GR_READY);
41516 +}
41517 +
41518 +#ifdef CONFIG_BTRFS_FS
41519 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41520 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41521 +#endif
41522 +
41523 +static inline dev_t __get_dev(const struct dentry *dentry)
41524 +{
41525 +#ifdef CONFIG_BTRFS_FS
41526 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41527 + return get_btrfs_dev_from_inode(dentry->d_inode);
41528 + else
41529 +#endif
41530 + return dentry->d_inode->i_sb->s_dev;
41531 +}
41532 +
41533 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41534 +{
41535 + return __get_dev(dentry);
41536 +}
41537 +
41538 +static char gr_task_roletype_to_char(struct task_struct *task)
41539 +{
41540 + switch (task->role->roletype &
41541 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41542 + GR_ROLE_SPECIAL)) {
41543 + case GR_ROLE_DEFAULT:
41544 + return 'D';
41545 + case GR_ROLE_USER:
41546 + return 'U';
41547 + case GR_ROLE_GROUP:
41548 + return 'G';
41549 + case GR_ROLE_SPECIAL:
41550 + return 'S';
41551 + }
41552 +
41553 + return 'X';
41554 +}
41555 +
41556 +char gr_roletype_to_char(void)
41557 +{
41558 + return gr_task_roletype_to_char(current);
41559 +}
41560 +
41561 +__inline__ int
41562 +gr_acl_tpe_check(void)
41563 +{
41564 + if (unlikely(!(gr_status & GR_READY)))
41565 + return 0;
41566 + if (current->role->roletype & GR_ROLE_TPE)
41567 + return 1;
41568 + else
41569 + return 0;
41570 +}
41571 +
41572 +int
41573 +gr_handle_rawio(const struct inode *inode)
41574 +{
41575 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41576 + if (inode && S_ISBLK(inode->i_mode) &&
41577 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41578 + !capable(CAP_SYS_RAWIO))
41579 + return 1;
41580 +#endif
41581 + return 0;
41582 +}
41583 +
41584 +static int
41585 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41586 +{
41587 + if (likely(lena != lenb))
41588 + return 0;
41589 +
41590 + return !memcmp(a, b, lena);
41591 +}
41592 +
41593 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41594 +{
41595 + *buflen -= namelen;
41596 + if (*buflen < 0)
41597 + return -ENAMETOOLONG;
41598 + *buffer -= namelen;
41599 + memcpy(*buffer, str, namelen);
41600 + return 0;
41601 +}
41602 +
41603 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41604 +{
41605 + return prepend(buffer, buflen, name->name, name->len);
41606 +}
41607 +
41608 +static int prepend_path(const struct path *path, struct path *root,
41609 + char **buffer, int *buflen)
41610 +{
41611 + struct dentry *dentry = path->dentry;
41612 + struct vfsmount *vfsmnt = path->mnt;
41613 + bool slash = false;
41614 + int error = 0;
41615 +
41616 + while (dentry != root->dentry || vfsmnt != root->mnt) {
41617 + struct dentry * parent;
41618 +
41619 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41620 + /* Global root? */
41621 + if (vfsmnt->mnt_parent == vfsmnt) {
41622 + goto out;
41623 + }
41624 + dentry = vfsmnt->mnt_mountpoint;
41625 + vfsmnt = vfsmnt->mnt_parent;
41626 + continue;
41627 + }
41628 + parent = dentry->d_parent;
41629 + prefetch(parent);
41630 + spin_lock(&dentry->d_lock);
41631 + error = prepend_name(buffer, buflen, &dentry->d_name);
41632 + spin_unlock(&dentry->d_lock);
41633 + if (!error)
41634 + error = prepend(buffer, buflen, "/", 1);
41635 + if (error)
41636 + break;
41637 +
41638 + slash = true;
41639 + dentry = parent;
41640 + }
41641 +
41642 +out:
41643 + if (!error && !slash)
41644 + error = prepend(buffer, buflen, "/", 1);
41645 +
41646 + return error;
41647 +}
41648 +
41649 +/* this must be called with vfsmount_lock and rename_lock held */
41650 +
41651 +static char *__our_d_path(const struct path *path, struct path *root,
41652 + char *buf, int buflen)
41653 +{
41654 + char *res = buf + buflen;
41655 + int error;
41656 +
41657 + prepend(&res, &buflen, "\0", 1);
41658 + error = prepend_path(path, root, &res, &buflen);
41659 + if (error)
41660 + return ERR_PTR(error);
41661 +
41662 + return res;
41663 +}
41664 +
41665 +static char *
41666 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41667 +{
41668 + char *retval;
41669 +
41670 + retval = __our_d_path(path, root, buf, buflen);
41671 + if (unlikely(IS_ERR(retval)))
41672 + retval = strcpy(buf, "<path too long>");
41673 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41674 + retval[1] = '\0';
41675 +
41676 + return retval;
41677 +}
41678 +
41679 +static char *
41680 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41681 + char *buf, int buflen)
41682 +{
41683 + struct path path;
41684 + char *res;
41685 +
41686 + path.dentry = (struct dentry *)dentry;
41687 + path.mnt = (struct vfsmount *)vfsmnt;
41688 +
41689 + /* we can use real_root.dentry, real_root.mnt, because this is only called
41690 + by the RBAC system */
41691 + res = gen_full_path(&path, &real_root, buf, buflen);
41692 +
41693 + return res;
41694 +}
41695 +
41696 +static char *
41697 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41698 + char *buf, int buflen)
41699 +{
41700 + char *res;
41701 + struct path path;
41702 + struct path root;
41703 + struct task_struct *reaper = &init_task;
41704 +
41705 + path.dentry = (struct dentry *)dentry;
41706 + path.mnt = (struct vfsmount *)vfsmnt;
41707 +
41708 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41709 + get_fs_root(reaper->fs, &root);
41710 +
41711 + write_seqlock(&rename_lock);
41712 + br_read_lock(vfsmount_lock);
41713 + res = gen_full_path(&path, &root, buf, buflen);
41714 + br_read_unlock(vfsmount_lock);
41715 + write_sequnlock(&rename_lock);
41716 +
41717 + path_put(&root);
41718 + return res;
41719 +}
41720 +
41721 +static char *
41722 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41723 +{
41724 + char *ret;
41725 + write_seqlock(&rename_lock);
41726 + br_read_lock(vfsmount_lock);
41727 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41728 + PAGE_SIZE);
41729 + br_read_unlock(vfsmount_lock);
41730 + write_sequnlock(&rename_lock);
41731 + return ret;
41732 +}
41733 +
41734 +char *
41735 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41736 +{
41737 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41738 + PAGE_SIZE);
41739 +}
41740 +
41741 +char *
41742 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41743 +{
41744 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41745 + PAGE_SIZE);
41746 +}
41747 +
41748 +char *
41749 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41750 +{
41751 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41752 + PAGE_SIZE);
41753 +}
41754 +
41755 +char *
41756 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41757 +{
41758 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41759 + PAGE_SIZE);
41760 +}
41761 +
41762 +char *
41763 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41764 +{
41765 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41766 + PAGE_SIZE);
41767 +}
41768 +
41769 +__inline__ __u32
41770 +to_gr_audit(const __u32 reqmode)
41771 +{
41772 + /* masks off auditable permission flags, then shifts them to create
41773 + auditing flags, and adds the special case of append auditing if
41774 + we're requesting write */
41775 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41776 +}
41777 +
41778 +struct acl_subject_label *
41779 +lookup_subject_map(const struct acl_subject_label *userp)
41780 +{
41781 + unsigned int index = shash(userp, subj_map_set.s_size);
41782 + struct subject_map *match;
41783 +
41784 + match = subj_map_set.s_hash[index];
41785 +
41786 + while (match && match->user != userp)
41787 + match = match->next;
41788 +
41789 + if (match != NULL)
41790 + return match->kernel;
41791 + else
41792 + return NULL;
41793 +}
41794 +
41795 +static void
41796 +insert_subj_map_entry(struct subject_map *subjmap)
41797 +{
41798 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41799 + struct subject_map **curr;
41800 +
41801 + subjmap->prev = NULL;
41802 +
41803 + curr = &subj_map_set.s_hash[index];
41804 + if (*curr != NULL)
41805 + (*curr)->prev = subjmap;
41806 +
41807 + subjmap->next = *curr;
41808 + *curr = subjmap;
41809 +
41810 + return;
41811 +}
41812 +
41813 +static struct acl_role_label *
41814 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41815 + const gid_t gid)
41816 +{
41817 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41818 + struct acl_role_label *match;
41819 + struct role_allowed_ip *ipp;
41820 + unsigned int x;
41821 + u32 curr_ip = task->signal->curr_ip;
41822 +
41823 + task->signal->saved_ip = curr_ip;
41824 +
41825 + match = acl_role_set.r_hash[index];
41826 +
41827 + while (match) {
41828 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41829 + for (x = 0; x < match->domain_child_num; x++) {
41830 + if (match->domain_children[x] == uid)
41831 + goto found;
41832 + }
41833 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41834 + break;
41835 + match = match->next;
41836 + }
41837 +found:
41838 + if (match == NULL) {
41839 + try_group:
41840 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41841 + match = acl_role_set.r_hash[index];
41842 +
41843 + while (match) {
41844 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41845 + for (x = 0; x < match->domain_child_num; x++) {
41846 + if (match->domain_children[x] == gid)
41847 + goto found2;
41848 + }
41849 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41850 + break;
41851 + match = match->next;
41852 + }
41853 +found2:
41854 + if (match == NULL)
41855 + match = default_role;
41856 + if (match->allowed_ips == NULL)
41857 + return match;
41858 + else {
41859 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41860 + if (likely
41861 + ((ntohl(curr_ip) & ipp->netmask) ==
41862 + (ntohl(ipp->addr) & ipp->netmask)))
41863 + return match;
41864 + }
41865 + match = default_role;
41866 + }
41867 + } else if (match->allowed_ips == NULL) {
41868 + return match;
41869 + } else {
41870 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41871 + if (likely
41872 + ((ntohl(curr_ip) & ipp->netmask) ==
41873 + (ntohl(ipp->addr) & ipp->netmask)))
41874 + return match;
41875 + }
41876 + goto try_group;
41877 + }
41878 +
41879 + return match;
41880 +}
41881 +
41882 +struct acl_subject_label *
41883 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41884 + const struct acl_role_label *role)
41885 +{
41886 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41887 + struct acl_subject_label *match;
41888 +
41889 + match = role->subj_hash[index];
41890 +
41891 + while (match && (match->inode != ino || match->device != dev ||
41892 + (match->mode & GR_DELETED))) {
41893 + match = match->next;
41894 + }
41895 +
41896 + if (match && !(match->mode & GR_DELETED))
41897 + return match;
41898 + else
41899 + return NULL;
41900 +}
41901 +
41902 +struct acl_subject_label *
41903 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41904 + const struct acl_role_label *role)
41905 +{
41906 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41907 + struct acl_subject_label *match;
41908 +
41909 + match = role->subj_hash[index];
41910 +
41911 + while (match && (match->inode != ino || match->device != dev ||
41912 + !(match->mode & GR_DELETED))) {
41913 + match = match->next;
41914 + }
41915 +
41916 + if (match && (match->mode & GR_DELETED))
41917 + return match;
41918 + else
41919 + return NULL;
41920 +}
41921 +
41922 +static struct acl_object_label *
41923 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
41924 + const struct acl_subject_label *subj)
41925 +{
41926 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41927 + struct acl_object_label *match;
41928 +
41929 + match = subj->obj_hash[index];
41930 +
41931 + while (match && (match->inode != ino || match->device != dev ||
41932 + (match->mode & GR_DELETED))) {
41933 + match = match->next;
41934 + }
41935 +
41936 + if (match && !(match->mode & GR_DELETED))
41937 + return match;
41938 + else
41939 + return NULL;
41940 +}
41941 +
41942 +static struct acl_object_label *
41943 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
41944 + const struct acl_subject_label *subj)
41945 +{
41946 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
41947 + struct acl_object_label *match;
41948 +
41949 + match = subj->obj_hash[index];
41950 +
41951 + while (match && (match->inode != ino || match->device != dev ||
41952 + !(match->mode & GR_DELETED))) {
41953 + match = match->next;
41954 + }
41955 +
41956 + if (match && (match->mode & GR_DELETED))
41957 + return match;
41958 +
41959 + match = subj->obj_hash[index];
41960 +
41961 + while (match && (match->inode != ino || match->device != dev ||
41962 + (match->mode & GR_DELETED))) {
41963 + match = match->next;
41964 + }
41965 +
41966 + if (match && !(match->mode & GR_DELETED))
41967 + return match;
41968 + else
41969 + return NULL;
41970 +}
41971 +
41972 +static struct name_entry *
41973 +lookup_name_entry(const char *name)
41974 +{
41975 + unsigned int len = strlen(name);
41976 + unsigned int key = full_name_hash(name, len);
41977 + unsigned int index = key % name_set.n_size;
41978 + struct name_entry *match;
41979 +
41980 + match = name_set.n_hash[index];
41981 +
41982 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
41983 + match = match->next;
41984 +
41985 + return match;
41986 +}
41987 +
41988 +static struct name_entry *
41989 +lookup_name_entry_create(const char *name)
41990 +{
41991 + unsigned int len = strlen(name);
41992 + unsigned int key = full_name_hash(name, len);
41993 + unsigned int index = key % name_set.n_size;
41994 + struct name_entry *match;
41995 +
41996 + match = name_set.n_hash[index];
41997 +
41998 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
41999 + !match->deleted))
42000 + match = match->next;
42001 +
42002 + if (match && match->deleted)
42003 + return match;
42004 +
42005 + match = name_set.n_hash[index];
42006 +
42007 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42008 + match->deleted))
42009 + match = match->next;
42010 +
42011 + if (match && !match->deleted)
42012 + return match;
42013 + else
42014 + return NULL;
42015 +}
42016 +
42017 +static struct inodev_entry *
42018 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
42019 +{
42020 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
42021 + struct inodev_entry *match;
42022 +
42023 + match = inodev_set.i_hash[index];
42024 +
42025 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
42026 + match = match->next;
42027 +
42028 + return match;
42029 +}
42030 +
42031 +static void
42032 +insert_inodev_entry(struct inodev_entry *entry)
42033 +{
42034 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
42035 + inodev_set.i_size);
42036 + struct inodev_entry **curr;
42037 +
42038 + entry->prev = NULL;
42039 +
42040 + curr = &inodev_set.i_hash[index];
42041 + if (*curr != NULL)
42042 + (*curr)->prev = entry;
42043 +
42044 + entry->next = *curr;
42045 + *curr = entry;
42046 +
42047 + return;
42048 +}
42049 +
42050 +static void
42051 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42052 +{
42053 + unsigned int index =
42054 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42055 + struct acl_role_label **curr;
42056 + struct acl_role_label *tmp;
42057 +
42058 + curr = &acl_role_set.r_hash[index];
42059 +
42060 + /* if role was already inserted due to domains and already has
42061 + a role in the same bucket as it attached, then we need to
42062 + combine these two buckets
42063 + */
42064 + if (role->next) {
42065 + tmp = role->next;
42066 + while (tmp->next)
42067 + tmp = tmp->next;
42068 + tmp->next = *curr;
42069 + } else
42070 + role->next = *curr;
42071 + *curr = role;
42072 +
42073 + return;
42074 +}
42075 +
42076 +static void
42077 +insert_acl_role_label(struct acl_role_label *role)
42078 +{
42079 + int i;
42080 +
42081 + if (role_list == NULL) {
42082 + role_list = role;
42083 + role->prev = NULL;
42084 + } else {
42085 + role->prev = role_list;
42086 + role_list = role;
42087 + }
42088 +
42089 + /* used for hash chains */
42090 + role->next = NULL;
42091 +
42092 + if (role->roletype & GR_ROLE_DOMAIN) {
42093 + for (i = 0; i < role->domain_child_num; i++)
42094 + __insert_acl_role_label(role, role->domain_children[i]);
42095 + } else
42096 + __insert_acl_role_label(role, role->uidgid);
42097 +}
42098 +
42099 +static int
42100 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42101 +{
42102 + struct name_entry **curr, *nentry;
42103 + struct inodev_entry *ientry;
42104 + unsigned int len = strlen(name);
42105 + unsigned int key = full_name_hash(name, len);
42106 + unsigned int index = key % name_set.n_size;
42107 +
42108 + curr = &name_set.n_hash[index];
42109 +
42110 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42111 + curr = &((*curr)->next);
42112 +
42113 + if (*curr != NULL)
42114 + return 1;
42115 +
42116 + nentry = acl_alloc(sizeof (struct name_entry));
42117 + if (nentry == NULL)
42118 + return 0;
42119 + ientry = acl_alloc(sizeof (struct inodev_entry));
42120 + if (ientry == NULL)
42121 + return 0;
42122 + ientry->nentry = nentry;
42123 +
42124 + nentry->key = key;
42125 + nentry->name = name;
42126 + nentry->inode = inode;
42127 + nentry->device = device;
42128 + nentry->len = len;
42129 + nentry->deleted = deleted;
42130 +
42131 + nentry->prev = NULL;
42132 + curr = &name_set.n_hash[index];
42133 + if (*curr != NULL)
42134 + (*curr)->prev = nentry;
42135 + nentry->next = *curr;
42136 + *curr = nentry;
42137 +
42138 + /* insert us into the table searchable by inode/dev */
42139 + insert_inodev_entry(ientry);
42140 +
42141 + return 1;
42142 +}
42143 +
42144 +static void
42145 +insert_acl_obj_label(struct acl_object_label *obj,
42146 + struct acl_subject_label *subj)
42147 +{
42148 + unsigned int index =
42149 + fhash(obj->inode, obj->device, subj->obj_hash_size);
42150 + struct acl_object_label **curr;
42151 +
42152 +
42153 + obj->prev = NULL;
42154 +
42155 + curr = &subj->obj_hash[index];
42156 + if (*curr != NULL)
42157 + (*curr)->prev = obj;
42158 +
42159 + obj->next = *curr;
42160 + *curr = obj;
42161 +
42162 + return;
42163 +}
42164 +
42165 +static void
42166 +insert_acl_subj_label(struct acl_subject_label *obj,
42167 + struct acl_role_label *role)
42168 +{
42169 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42170 + struct acl_subject_label **curr;
42171 +
42172 + obj->prev = NULL;
42173 +
42174 + curr = &role->subj_hash[index];
42175 + if (*curr != NULL)
42176 + (*curr)->prev = obj;
42177 +
42178 + obj->next = *curr;
42179 + *curr = obj;
42180 +
42181 + return;
42182 +}
42183 +
42184 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42185 +
42186 +static void *
42187 +create_table(__u32 * len, int elementsize)
42188 +{
42189 + unsigned int table_sizes[] = {
42190 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42191 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42192 + 4194301, 8388593, 16777213, 33554393, 67108859
42193 + };
42194 + void *newtable = NULL;
42195 + unsigned int pwr = 0;
42196 +
42197 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42198 + table_sizes[pwr] <= *len)
42199 + pwr++;
42200 +
42201 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42202 + return newtable;
42203 +
42204 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42205 + newtable =
42206 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42207 + else
42208 + newtable = vmalloc(table_sizes[pwr] * elementsize);
42209 +
42210 + *len = table_sizes[pwr];
42211 +
42212 + return newtable;
42213 +}
42214 +
42215 +static int
42216 +init_variables(const struct gr_arg *arg)
42217 +{
42218 + struct task_struct *reaper = &init_task;
42219 + unsigned int stacksize;
42220 +
42221 + subj_map_set.s_size = arg->role_db.num_subjects;
42222 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42223 + name_set.n_size = arg->role_db.num_objects;
42224 + inodev_set.i_size = arg->role_db.num_objects;
42225 +
42226 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
42227 + !name_set.n_size || !inodev_set.i_size)
42228 + return 1;
42229 +
42230 + if (!gr_init_uidset())
42231 + return 1;
42232 +
42233 + /* set up the stack that holds allocation info */
42234 +
42235 + stacksize = arg->role_db.num_pointers + 5;
42236 +
42237 + if (!acl_alloc_stack_init(stacksize))
42238 + return 1;
42239 +
42240 + /* grab reference for the real root dentry and vfsmount */
42241 + get_fs_root(reaper->fs, &real_root);
42242 +
42243 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42244 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42245 +#endif
42246 +
42247 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42248 + if (fakefs_obj_rw == NULL)
42249 + return 1;
42250 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42251 +
42252 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42253 + if (fakefs_obj_rwx == NULL)
42254 + return 1;
42255 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42256 +
42257 + subj_map_set.s_hash =
42258 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42259 + acl_role_set.r_hash =
42260 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42261 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42262 + inodev_set.i_hash =
42263 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42264 +
42265 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42266 + !name_set.n_hash || !inodev_set.i_hash)
42267 + return 1;
42268 +
42269 + memset(subj_map_set.s_hash, 0,
42270 + sizeof(struct subject_map *) * subj_map_set.s_size);
42271 + memset(acl_role_set.r_hash, 0,
42272 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
42273 + memset(name_set.n_hash, 0,
42274 + sizeof (struct name_entry *) * name_set.n_size);
42275 + memset(inodev_set.i_hash, 0,
42276 + sizeof (struct inodev_entry *) * inodev_set.i_size);
42277 +
42278 + return 0;
42279 +}
42280 +
42281 +/* free information not needed after startup
42282 + currently contains user->kernel pointer mappings for subjects
42283 +*/
42284 +
42285 +static void
42286 +free_init_variables(void)
42287 +{
42288 + __u32 i;
42289 +
42290 + if (subj_map_set.s_hash) {
42291 + for (i = 0; i < subj_map_set.s_size; i++) {
42292 + if (subj_map_set.s_hash[i]) {
42293 + kfree(subj_map_set.s_hash[i]);
42294 + subj_map_set.s_hash[i] = NULL;
42295 + }
42296 + }
42297 +
42298 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42299 + PAGE_SIZE)
42300 + kfree(subj_map_set.s_hash);
42301 + else
42302 + vfree(subj_map_set.s_hash);
42303 + }
42304 +
42305 + return;
42306 +}
42307 +
42308 +static void
42309 +free_variables(void)
42310 +{
42311 + struct acl_subject_label *s;
42312 + struct acl_role_label *r;
42313 + struct task_struct *task, *task2;
42314 + unsigned int x;
42315 +
42316 + gr_clear_learn_entries();
42317 +
42318 + read_lock(&tasklist_lock);
42319 + do_each_thread(task2, task) {
42320 + task->acl_sp_role = 0;
42321 + task->acl_role_id = 0;
42322 + task->acl = NULL;
42323 + task->role = NULL;
42324 + } while_each_thread(task2, task);
42325 + read_unlock(&tasklist_lock);
42326 +
42327 + /* release the reference to the real root dentry and vfsmount */
42328 + path_put(&real_root);
42329 +
42330 + /* free all object hash tables */
42331 +
42332 + FOR_EACH_ROLE_START(r)
42333 + if (r->subj_hash == NULL)
42334 + goto next_role;
42335 + FOR_EACH_SUBJECT_START(r, s, x)
42336 + if (s->obj_hash == NULL)
42337 + break;
42338 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42339 + kfree(s->obj_hash);
42340 + else
42341 + vfree(s->obj_hash);
42342 + FOR_EACH_SUBJECT_END(s, x)
42343 + FOR_EACH_NESTED_SUBJECT_START(r, s)
42344 + if (s->obj_hash == NULL)
42345 + break;
42346 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42347 + kfree(s->obj_hash);
42348 + else
42349 + vfree(s->obj_hash);
42350 + FOR_EACH_NESTED_SUBJECT_END(s)
42351 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42352 + kfree(r->subj_hash);
42353 + else
42354 + vfree(r->subj_hash);
42355 + r->subj_hash = NULL;
42356 +next_role:
42357 + FOR_EACH_ROLE_END(r)
42358 +
42359 + acl_free_all();
42360 +
42361 + if (acl_role_set.r_hash) {
42362 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42363 + PAGE_SIZE)
42364 + kfree(acl_role_set.r_hash);
42365 + else
42366 + vfree(acl_role_set.r_hash);
42367 + }
42368 + if (name_set.n_hash) {
42369 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
42370 + PAGE_SIZE)
42371 + kfree(name_set.n_hash);
42372 + else
42373 + vfree(name_set.n_hash);
42374 + }
42375 +
42376 + if (inodev_set.i_hash) {
42377 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42378 + PAGE_SIZE)
42379 + kfree(inodev_set.i_hash);
42380 + else
42381 + vfree(inodev_set.i_hash);
42382 + }
42383 +
42384 + gr_free_uidset();
42385 +
42386 + memset(&name_set, 0, sizeof (struct name_db));
42387 + memset(&inodev_set, 0, sizeof (struct inodev_db));
42388 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42389 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42390 +
42391 + default_role = NULL;
42392 + role_list = NULL;
42393 +
42394 + return;
42395 +}
42396 +
42397 +static __u32
42398 +count_user_objs(struct acl_object_label *userp)
42399 +{
42400 + struct acl_object_label o_tmp;
42401 + __u32 num = 0;
42402 +
42403 + while (userp) {
42404 + if (copy_from_user(&o_tmp, userp,
42405 + sizeof (struct acl_object_label)))
42406 + break;
42407 +
42408 + userp = o_tmp.prev;
42409 + num++;
42410 + }
42411 +
42412 + return num;
42413 +}
42414 +
42415 +static struct acl_subject_label *
42416 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42417 +
42418 +static int
42419 +copy_user_glob(struct acl_object_label *obj)
42420 +{
42421 + struct acl_object_label *g_tmp, **guser;
42422 + unsigned int len;
42423 + char *tmp;
42424 +
42425 + if (obj->globbed == NULL)
42426 + return 0;
42427 +
42428 + guser = &obj->globbed;
42429 + while (*guser) {
42430 + g_tmp = (struct acl_object_label *)
42431 + acl_alloc(sizeof (struct acl_object_label));
42432 + if (g_tmp == NULL)
42433 + return -ENOMEM;
42434 +
42435 + if (copy_from_user(g_tmp, *guser,
42436 + sizeof (struct acl_object_label)))
42437 + return -EFAULT;
42438 +
42439 + len = strnlen_user(g_tmp->filename, PATH_MAX);
42440 +
42441 + if (!len || len >= PATH_MAX)
42442 + return -EINVAL;
42443 +
42444 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42445 + return -ENOMEM;
42446 +
42447 + if (copy_from_user(tmp, g_tmp->filename, len))
42448 + return -EFAULT;
42449 + tmp[len-1] = '\0';
42450 + g_tmp->filename = tmp;
42451 +
42452 + *guser = g_tmp;
42453 + guser = &(g_tmp->next);
42454 + }
42455 +
42456 + return 0;
42457 +}
42458 +
42459 +static int
42460 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42461 + struct acl_role_label *role)
42462 +{
42463 + struct acl_object_label *o_tmp;
42464 + unsigned int len;
42465 + int ret;
42466 + char *tmp;
42467 +
42468 + while (userp) {
42469 + if ((o_tmp = (struct acl_object_label *)
42470 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
42471 + return -ENOMEM;
42472 +
42473 + if (copy_from_user(o_tmp, userp,
42474 + sizeof (struct acl_object_label)))
42475 + return -EFAULT;
42476 +
42477 + userp = o_tmp->prev;
42478 +
42479 + len = strnlen_user(o_tmp->filename, PATH_MAX);
42480 +
42481 + if (!len || len >= PATH_MAX)
42482 + return -EINVAL;
42483 +
42484 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42485 + return -ENOMEM;
42486 +
42487 + if (copy_from_user(tmp, o_tmp->filename, len))
42488 + return -EFAULT;
42489 + tmp[len-1] = '\0';
42490 + o_tmp->filename = tmp;
42491 +
42492 + insert_acl_obj_label(o_tmp, subj);
42493 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42494 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42495 + return -ENOMEM;
42496 +
42497 + ret = copy_user_glob(o_tmp);
42498 + if (ret)
42499 + return ret;
42500 +
42501 + if (o_tmp->nested) {
42502 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42503 + if (IS_ERR(o_tmp->nested))
42504 + return PTR_ERR(o_tmp->nested);
42505 +
42506 + /* insert into nested subject list */
42507 + o_tmp->nested->next = role->hash->first;
42508 + role->hash->first = o_tmp->nested;
42509 + }
42510 + }
42511 +
42512 + return 0;
42513 +}
42514 +
42515 +static __u32
42516 +count_user_subjs(struct acl_subject_label *userp)
42517 +{
42518 + struct acl_subject_label s_tmp;
42519 + __u32 num = 0;
42520 +
42521 + while (userp) {
42522 + if (copy_from_user(&s_tmp, userp,
42523 + sizeof (struct acl_subject_label)))
42524 + break;
42525 +
42526 + userp = s_tmp.prev;
42527 + /* do not count nested subjects against this count, since
42528 + they are not included in the hash table, but are
42529 + attached to objects. We have already counted
42530 + the subjects in userspace for the allocation
42531 + stack
42532 + */
42533 + if (!(s_tmp.mode & GR_NESTED))
42534 + num++;
42535 + }
42536 +
42537 + return num;
42538 +}
42539 +
42540 +static int
42541 +copy_user_allowedips(struct acl_role_label *rolep)
42542 +{
42543 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42544 +
42545 + ruserip = rolep->allowed_ips;
42546 +
42547 + while (ruserip) {
42548 + rlast = rtmp;
42549 +
42550 + if ((rtmp = (struct role_allowed_ip *)
42551 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42552 + return -ENOMEM;
42553 +
42554 + if (copy_from_user(rtmp, ruserip,
42555 + sizeof (struct role_allowed_ip)))
42556 + return -EFAULT;
42557 +
42558 + ruserip = rtmp->prev;
42559 +
42560 + if (!rlast) {
42561 + rtmp->prev = NULL;
42562 + rolep->allowed_ips = rtmp;
42563 + } else {
42564 + rlast->next = rtmp;
42565 + rtmp->prev = rlast;
42566 + }
42567 +
42568 + if (!ruserip)
42569 + rtmp->next = NULL;
42570 + }
42571 +
42572 + return 0;
42573 +}
42574 +
42575 +static int
42576 +copy_user_transitions(struct acl_role_label *rolep)
42577 +{
42578 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
42579 +
42580 + unsigned int len;
42581 + char *tmp;
42582 +
42583 + rusertp = rolep->transitions;
42584 +
42585 + while (rusertp) {
42586 + rlast = rtmp;
42587 +
42588 + if ((rtmp = (struct role_transition *)
42589 + acl_alloc(sizeof (struct role_transition))) == NULL)
42590 + return -ENOMEM;
42591 +
42592 + if (copy_from_user(rtmp, rusertp,
42593 + sizeof (struct role_transition)))
42594 + return -EFAULT;
42595 +
42596 + rusertp = rtmp->prev;
42597 +
42598 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42599 +
42600 + if (!len || len >= GR_SPROLE_LEN)
42601 + return -EINVAL;
42602 +
42603 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42604 + return -ENOMEM;
42605 +
42606 + if (copy_from_user(tmp, rtmp->rolename, len))
42607 + return -EFAULT;
42608 + tmp[len-1] = '\0';
42609 + rtmp->rolename = tmp;
42610 +
42611 + if (!rlast) {
42612 + rtmp->prev = NULL;
42613 + rolep->transitions = rtmp;
42614 + } else {
42615 + rlast->next = rtmp;
42616 + rtmp->prev = rlast;
42617 + }
42618 +
42619 + if (!rusertp)
42620 + rtmp->next = NULL;
42621 + }
42622 +
42623 + return 0;
42624 +}
42625 +
42626 +static struct acl_subject_label *
42627 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42628 +{
42629 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42630 + unsigned int len;
42631 + char *tmp;
42632 + __u32 num_objs;
42633 + struct acl_ip_label **i_tmp, *i_utmp2;
42634 + struct gr_hash_struct ghash;
42635 + struct subject_map *subjmap;
42636 + unsigned int i_num;
42637 + int err;
42638 +
42639 + s_tmp = lookup_subject_map(userp);
42640 +
42641 + /* we've already copied this subject into the kernel, just return
42642 + the reference to it, and don't copy it over again
42643 + */
42644 + if (s_tmp)
42645 + return(s_tmp);
42646 +
42647 + if ((s_tmp = (struct acl_subject_label *)
42648 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42649 + return ERR_PTR(-ENOMEM);
42650 +
42651 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42652 + if (subjmap == NULL)
42653 + return ERR_PTR(-ENOMEM);
42654 +
42655 + subjmap->user = userp;
42656 + subjmap->kernel = s_tmp;
42657 + insert_subj_map_entry(subjmap);
42658 +
42659 + if (copy_from_user(s_tmp, userp,
42660 + sizeof (struct acl_subject_label)))
42661 + return ERR_PTR(-EFAULT);
42662 +
42663 + len = strnlen_user(s_tmp->filename, PATH_MAX);
42664 +
42665 + if (!len || len >= PATH_MAX)
42666 + return ERR_PTR(-EINVAL);
42667 +
42668 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42669 + return ERR_PTR(-ENOMEM);
42670 +
42671 + if (copy_from_user(tmp, s_tmp->filename, len))
42672 + return ERR_PTR(-EFAULT);
42673 + tmp[len-1] = '\0';
42674 + s_tmp->filename = tmp;
42675 +
42676 + if (!strcmp(s_tmp->filename, "/"))
42677 + role->root_label = s_tmp;
42678 +
42679 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42680 + return ERR_PTR(-EFAULT);
42681 +
42682 + /* copy user and group transition tables */
42683 +
42684 + if (s_tmp->user_trans_num) {
42685 + uid_t *uidlist;
42686 +
42687 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42688 + if (uidlist == NULL)
42689 + return ERR_PTR(-ENOMEM);
42690 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42691 + return ERR_PTR(-EFAULT);
42692 +
42693 + s_tmp->user_transitions = uidlist;
42694 + }
42695 +
42696 + if (s_tmp->group_trans_num) {
42697 + gid_t *gidlist;
42698 +
42699 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42700 + if (gidlist == NULL)
42701 + return ERR_PTR(-ENOMEM);
42702 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42703 + return ERR_PTR(-EFAULT);
42704 +
42705 + s_tmp->group_transitions = gidlist;
42706 + }
42707 +
42708 + /* set up object hash table */
42709 + num_objs = count_user_objs(ghash.first);
42710 +
42711 + s_tmp->obj_hash_size = num_objs;
42712 + s_tmp->obj_hash =
42713 + (struct acl_object_label **)
42714 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42715 +
42716 + if (!s_tmp->obj_hash)
42717 + return ERR_PTR(-ENOMEM);
42718 +
42719 + memset(s_tmp->obj_hash, 0,
42720 + s_tmp->obj_hash_size *
42721 + sizeof (struct acl_object_label *));
42722 +
42723 + /* add in objects */
42724 + err = copy_user_objs(ghash.first, s_tmp, role);
42725 +
42726 + if (err)
42727 + return ERR_PTR(err);
42728 +
42729 + /* set pointer for parent subject */
42730 + if (s_tmp->parent_subject) {
42731 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42732 +
42733 + if (IS_ERR(s_tmp2))
42734 + return s_tmp2;
42735 +
42736 + s_tmp->parent_subject = s_tmp2;
42737 + }
42738 +
42739 + /* add in ip acls */
42740 +
42741 + if (!s_tmp->ip_num) {
42742 + s_tmp->ips = NULL;
42743 + goto insert;
42744 + }
42745 +
42746 + i_tmp =
42747 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42748 + sizeof (struct acl_ip_label *));
42749 +
42750 + if (!i_tmp)
42751 + return ERR_PTR(-ENOMEM);
42752 +
42753 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42754 + *(i_tmp + i_num) =
42755 + (struct acl_ip_label *)
42756 + acl_alloc(sizeof (struct acl_ip_label));
42757 + if (!*(i_tmp + i_num))
42758 + return ERR_PTR(-ENOMEM);
42759 +
42760 + if (copy_from_user
42761 + (&i_utmp2, s_tmp->ips + i_num,
42762 + sizeof (struct acl_ip_label *)))
42763 + return ERR_PTR(-EFAULT);
42764 +
42765 + if (copy_from_user
42766 + (*(i_tmp + i_num), i_utmp2,
42767 + sizeof (struct acl_ip_label)))
42768 + return ERR_PTR(-EFAULT);
42769 +
42770 + if ((*(i_tmp + i_num))->iface == NULL)
42771 + continue;
42772 +
42773 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42774 + if (!len || len >= IFNAMSIZ)
42775 + return ERR_PTR(-EINVAL);
42776 + tmp = acl_alloc(len);
42777 + if (tmp == NULL)
42778 + return ERR_PTR(-ENOMEM);
42779 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42780 + return ERR_PTR(-EFAULT);
42781 + (*(i_tmp + i_num))->iface = tmp;
42782 + }
42783 +
42784 + s_tmp->ips = i_tmp;
42785 +
42786 +insert:
42787 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42788 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42789 + return ERR_PTR(-ENOMEM);
42790 +
42791 + return s_tmp;
42792 +}
42793 +
42794 +static int
42795 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42796 +{
42797 + struct acl_subject_label s_pre;
42798 + struct acl_subject_label * ret;
42799 + int err;
42800 +
42801 + while (userp) {
42802 + if (copy_from_user(&s_pre, userp,
42803 + sizeof (struct acl_subject_label)))
42804 + return -EFAULT;
42805 +
42806 + /* do not add nested subjects here, add
42807 + while parsing objects
42808 + */
42809 +
42810 + if (s_pre.mode & GR_NESTED) {
42811 + userp = s_pre.prev;
42812 + continue;
42813 + }
42814 +
42815 + ret = do_copy_user_subj(userp, role);
42816 +
42817 + err = PTR_ERR(ret);
42818 + if (IS_ERR(ret))
42819 + return err;
42820 +
42821 + insert_acl_subj_label(ret, role);
42822 +
42823 + userp = s_pre.prev;
42824 + }
42825 +
42826 + return 0;
42827 +}
42828 +
42829 +static int
42830 +copy_user_acl(struct gr_arg *arg)
42831 +{
42832 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42833 + struct sprole_pw *sptmp;
42834 + struct gr_hash_struct *ghash;
42835 + uid_t *domainlist;
42836 + unsigned int r_num;
42837 + unsigned int len;
42838 + char *tmp;
42839 + int err = 0;
42840 + __u16 i;
42841 + __u32 num_subjs;
42842 +
42843 + /* we need a default and kernel role */
42844 + if (arg->role_db.num_roles < 2)
42845 + return -EINVAL;
42846 +
42847 + /* copy special role authentication info from userspace */
42848 +
42849 + num_sprole_pws = arg->num_sprole_pws;
42850 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42851 +
42852 + if (!acl_special_roles) {
42853 + err = -ENOMEM;
42854 + goto cleanup;
42855 + }
42856 +
42857 + for (i = 0; i < num_sprole_pws; i++) {
42858 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42859 + if (!sptmp) {
42860 + err = -ENOMEM;
42861 + goto cleanup;
42862 + }
42863 + if (copy_from_user(sptmp, arg->sprole_pws + i,
42864 + sizeof (struct sprole_pw))) {
42865 + err = -EFAULT;
42866 + goto cleanup;
42867 + }
42868 +
42869 + len =
42870 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42871 +
42872 + if (!len || len >= GR_SPROLE_LEN) {
42873 + err = -EINVAL;
42874 + goto cleanup;
42875 + }
42876 +
42877 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42878 + err = -ENOMEM;
42879 + goto cleanup;
42880 + }
42881 +
42882 + if (copy_from_user(tmp, sptmp->rolename, len)) {
42883 + err = -EFAULT;
42884 + goto cleanup;
42885 + }
42886 + tmp[len-1] = '\0';
42887 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42888 + printk(KERN_ALERT "Copying special role %s\n", tmp);
42889 +#endif
42890 + sptmp->rolename = tmp;
42891 + acl_special_roles[i] = sptmp;
42892 + }
42893 +
42894 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42895 +
42896 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42897 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
42898 +
42899 + if (!r_tmp) {
42900 + err = -ENOMEM;
42901 + goto cleanup;
42902 + }
42903 +
42904 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
42905 + sizeof (struct acl_role_label *))) {
42906 + err = -EFAULT;
42907 + goto cleanup;
42908 + }
42909 +
42910 + if (copy_from_user(r_tmp, r_utmp2,
42911 + sizeof (struct acl_role_label))) {
42912 + err = -EFAULT;
42913 + goto cleanup;
42914 + }
42915 +
42916 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
42917 +
42918 + if (!len || len >= PATH_MAX) {
42919 + err = -EINVAL;
42920 + goto cleanup;
42921 + }
42922 +
42923 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42924 + err = -ENOMEM;
42925 + goto cleanup;
42926 + }
42927 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
42928 + err = -EFAULT;
42929 + goto cleanup;
42930 + }
42931 + tmp[len-1] = '\0';
42932 + r_tmp->rolename = tmp;
42933 +
42934 + if (!strcmp(r_tmp->rolename, "default")
42935 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
42936 + default_role = r_tmp;
42937 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
42938 + kernel_role = r_tmp;
42939 + }
42940 +
42941 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
42942 + err = -ENOMEM;
42943 + goto cleanup;
42944 + }
42945 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
42946 + err = -EFAULT;
42947 + goto cleanup;
42948 + }
42949 +
42950 + r_tmp->hash = ghash;
42951 +
42952 + num_subjs = count_user_subjs(r_tmp->hash->first);
42953 +
42954 + r_tmp->subj_hash_size = num_subjs;
42955 + r_tmp->subj_hash =
42956 + (struct acl_subject_label **)
42957 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
42958 +
42959 + if (!r_tmp->subj_hash) {
42960 + err = -ENOMEM;
42961 + goto cleanup;
42962 + }
42963 +
42964 + err = copy_user_allowedips(r_tmp);
42965 + if (err)
42966 + goto cleanup;
42967 +
42968 + /* copy domain info */
42969 + if (r_tmp->domain_children != NULL) {
42970 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
42971 + if (domainlist == NULL) {
42972 + err = -ENOMEM;
42973 + goto cleanup;
42974 + }
42975 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
42976 + err = -EFAULT;
42977 + goto cleanup;
42978 + }
42979 + r_tmp->domain_children = domainlist;
42980 + }
42981 +
42982 + err = copy_user_transitions(r_tmp);
42983 + if (err)
42984 + goto cleanup;
42985 +
42986 + memset(r_tmp->subj_hash, 0,
42987 + r_tmp->subj_hash_size *
42988 + sizeof (struct acl_subject_label *));
42989 +
42990 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
42991 +
42992 + if (err)
42993 + goto cleanup;
42994 +
42995 + /* set nested subject list to null */
42996 + r_tmp->hash->first = NULL;
42997 +
42998 + insert_acl_role_label(r_tmp);
42999 + }
43000 +
43001 + goto return_err;
43002 + cleanup:
43003 + free_variables();
43004 + return_err:
43005 + return err;
43006 +
43007 +}
43008 +
43009 +static int
43010 +gracl_init(struct gr_arg *args)
43011 +{
43012 + int error = 0;
43013 +
43014 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
43015 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
43016 +
43017 + if (init_variables(args)) {
43018 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
43019 + error = -ENOMEM;
43020 + free_variables();
43021 + goto out;
43022 + }
43023 +
43024 + error = copy_user_acl(args);
43025 + free_init_variables();
43026 + if (error) {
43027 + free_variables();
43028 + goto out;
43029 + }
43030 +
43031 + if ((error = gr_set_acls(0))) {
43032 + free_variables();
43033 + goto out;
43034 + }
43035 +
43036 + pax_open_kernel();
43037 + gr_status |= GR_READY;
43038 + pax_close_kernel();
43039 +
43040 + out:
43041 + return error;
43042 +}
43043 +
43044 +/* derived from glibc fnmatch() 0: match, 1: no match*/
43045 +
43046 +static int
43047 +glob_match(const char *p, const char *n)
43048 +{
43049 + char c;
43050 +
43051 + while ((c = *p++) != '\0') {
43052 + switch (c) {
43053 + case '?':
43054 + if (*n == '\0')
43055 + return 1;
43056 + else if (*n == '/')
43057 + return 1;
43058 + break;
43059 + case '\\':
43060 + if (*n != c)
43061 + return 1;
43062 + break;
43063 + case '*':
43064 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
43065 + if (*n == '/')
43066 + return 1;
43067 + else if (c == '?') {
43068 + if (*n == '\0')
43069 + return 1;
43070 + else
43071 + ++n;
43072 + }
43073 + }
43074 + if (c == '\0') {
43075 + return 0;
43076 + } else {
43077 + const char *endp;
43078 +
43079 + if ((endp = strchr(n, '/')) == NULL)
43080 + endp = n + strlen(n);
43081 +
43082 + if (c == '[') {
43083 + for (--p; n < endp; ++n)
43084 + if (!glob_match(p, n))
43085 + return 0;
43086 + } else if (c == '/') {
43087 + while (*n != '\0' && *n != '/')
43088 + ++n;
43089 + if (*n == '/' && !glob_match(p, n + 1))
43090 + return 0;
43091 + } else {
43092 + for (--p; n < endp; ++n)
43093 + if (*n == c && !glob_match(p, n))
43094 + return 0;
43095 + }
43096 +
43097 + return 1;
43098 + }
43099 + case '[':
43100 + {
43101 + int not;
43102 + char cold;
43103 +
43104 + if (*n == '\0' || *n == '/')
43105 + return 1;
43106 +
43107 + not = (*p == '!' || *p == '^');
43108 + if (not)
43109 + ++p;
43110 +
43111 + c = *p++;
43112 + for (;;) {
43113 + unsigned char fn = (unsigned char)*n;
43114 +
43115 + if (c == '\0')
43116 + return 1;
43117 + else {
43118 + if (c == fn)
43119 + goto matched;
43120 + cold = c;
43121 + c = *p++;
43122 +
43123 + if (c == '-' && *p != ']') {
43124 + unsigned char cend = *p++;
43125 +
43126 + if (cend == '\0')
43127 + return 1;
43128 +
43129 + if (cold <= fn && fn <= cend)
43130 + goto matched;
43131 +
43132 + c = *p++;
43133 + }
43134 + }
43135 +
43136 + if (c == ']')
43137 + break;
43138 + }
43139 + if (!not)
43140 + return 1;
43141 + break;
43142 + matched:
43143 + while (c != ']') {
43144 + if (c == '\0')
43145 + return 1;
43146 +
43147 + c = *p++;
43148 + }
43149 + if (not)
43150 + return 1;
43151 + }
43152 + break;
43153 + default:
43154 + if (c != *n)
43155 + return 1;
43156 + }
43157 +
43158 + ++n;
43159 + }
43160 +
43161 + if (*n == '\0')
43162 + return 0;
43163 +
43164 + if (*n == '/')
43165 + return 0;
43166 +
43167 + return 1;
43168 +}
43169 +
43170 +static struct acl_object_label *
43171 +chk_glob_label(struct acl_object_label *globbed,
43172 + struct dentry *dentry, struct vfsmount *mnt, char **path)
43173 +{
43174 + struct acl_object_label *tmp;
43175 +
43176 + if (*path == NULL)
43177 + *path = gr_to_filename_nolock(dentry, mnt);
43178 +
43179 + tmp = globbed;
43180 +
43181 + while (tmp) {
43182 + if (!glob_match(tmp->filename, *path))
43183 + return tmp;
43184 + tmp = tmp->next;
43185 + }
43186 +
43187 + return NULL;
43188 +}
43189 +
43190 +static struct acl_object_label *
43191 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43192 + const ino_t curr_ino, const dev_t curr_dev,
43193 + const struct acl_subject_label *subj, char **path, const int checkglob)
43194 +{
43195 + struct acl_subject_label *tmpsubj;
43196 + struct acl_object_label *retval;
43197 + struct acl_object_label *retval2;
43198 +
43199 + tmpsubj = (struct acl_subject_label *) subj;
43200 + read_lock(&gr_inode_lock);
43201 + do {
43202 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43203 + if (retval) {
43204 + if (checkglob && retval->globbed) {
43205 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43206 + (struct vfsmount *)orig_mnt, path);
43207 + if (retval2)
43208 + retval = retval2;
43209 + }
43210 + break;
43211 + }
43212 + } while ((tmpsubj = tmpsubj->parent_subject));
43213 + read_unlock(&gr_inode_lock);
43214 +
43215 + return retval;
43216 +}
43217 +
43218 +static __inline__ struct acl_object_label *
43219 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43220 + struct dentry *curr_dentry,
43221 + const struct acl_subject_label *subj, char **path, const int checkglob)
43222 +{
43223 + int newglob = checkglob;
43224 + ino_t inode;
43225 + dev_t device;
43226 +
43227 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43228 + as we don't want a / * rule to match instead of the / object
43229 + don't do this for create lookups that call this function though, since they're looking up
43230 + on the parent and thus need globbing checks on all paths
43231 + */
43232 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43233 + newglob = GR_NO_GLOB;
43234 +
43235 + spin_lock(&curr_dentry->d_lock);
43236 + inode = curr_dentry->d_inode->i_ino;
43237 + device = __get_dev(curr_dentry);
43238 + spin_unlock(&curr_dentry->d_lock);
43239 +
43240 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43241 +}
43242 +
43243 +static struct acl_object_label *
43244 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43245 + const struct acl_subject_label *subj, char *path, const int checkglob)
43246 +{
43247 + struct dentry *dentry = (struct dentry *) l_dentry;
43248 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43249 + struct acl_object_label *retval;
43250 + struct dentry *parent;
43251 +
43252 + write_seqlock(&rename_lock);
43253 + br_read_lock(vfsmount_lock);
43254 +
43255 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43256 +#ifdef CONFIG_NET
43257 + mnt == sock_mnt ||
43258 +#endif
43259 +#ifdef CONFIG_HUGETLBFS
43260 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43261 +#endif
43262 + /* ignore Eric Biederman */
43263 + IS_PRIVATE(l_dentry->d_inode))) {
43264 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43265 + goto out;
43266 + }
43267 +
43268 + for (;;) {
43269 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43270 + break;
43271 +
43272 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43273 + if (mnt->mnt_parent == mnt)
43274 + break;
43275 +
43276 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43277 + if (retval != NULL)
43278 + goto out;
43279 +
43280 + dentry = mnt->mnt_mountpoint;
43281 + mnt = mnt->mnt_parent;
43282 + continue;
43283 + }
43284 +
43285 + parent = dentry->d_parent;
43286 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43287 + if (retval != NULL)
43288 + goto out;
43289 +
43290 + dentry = parent;
43291 + }
43292 +
43293 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43294 +
43295 + /* real_root is pinned so we don't have to hold a reference */
43296 + if (retval == NULL)
43297 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43298 +out:
43299 + br_read_unlock(vfsmount_lock);
43300 + write_sequnlock(&rename_lock);
43301 +
43302 + BUG_ON(retval == NULL);
43303 +
43304 + return retval;
43305 +}
43306 +
43307 +static __inline__ struct acl_object_label *
43308 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43309 + const struct acl_subject_label *subj)
43310 +{
43311 + char *path = NULL;
43312 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43313 +}
43314 +
43315 +static __inline__ struct acl_object_label *
43316 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43317 + const struct acl_subject_label *subj)
43318 +{
43319 + char *path = NULL;
43320 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43321 +}
43322 +
43323 +static __inline__ struct acl_object_label *
43324 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43325 + const struct acl_subject_label *subj, char *path)
43326 +{
43327 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43328 +}
43329 +
43330 +static struct acl_subject_label *
43331 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43332 + const struct acl_role_label *role)
43333 +{
43334 + struct dentry *dentry = (struct dentry *) l_dentry;
43335 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43336 + struct acl_subject_label *retval;
43337 + struct dentry *parent;
43338 +
43339 + write_seqlock(&rename_lock);
43340 + br_read_lock(vfsmount_lock);
43341 +
43342 + for (;;) {
43343 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43344 + break;
43345 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43346 + if (mnt->mnt_parent == mnt)
43347 + break;
43348 +
43349 + spin_lock(&dentry->d_lock);
43350 + read_lock(&gr_inode_lock);
43351 + retval =
43352 + lookup_acl_subj_label(dentry->d_inode->i_ino,
43353 + __get_dev(dentry), role);
43354 + read_unlock(&gr_inode_lock);
43355 + spin_unlock(&dentry->d_lock);
43356 + if (retval != NULL)
43357 + goto out;
43358 +
43359 + dentry = mnt->mnt_mountpoint;
43360 + mnt = mnt->mnt_parent;
43361 + continue;
43362 + }
43363 +
43364 + spin_lock(&dentry->d_lock);
43365 + read_lock(&gr_inode_lock);
43366 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43367 + __get_dev(dentry), role);
43368 + read_unlock(&gr_inode_lock);
43369 + parent = dentry->d_parent;
43370 + spin_unlock(&dentry->d_lock);
43371 +
43372 + if (retval != NULL)
43373 + goto out;
43374 +
43375 + dentry = parent;
43376 + }
43377 +
43378 + spin_lock(&dentry->d_lock);
43379 + read_lock(&gr_inode_lock);
43380 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43381 + __get_dev(dentry), role);
43382 + read_unlock(&gr_inode_lock);
43383 + spin_unlock(&dentry->d_lock);
43384 +
43385 + if (unlikely(retval == NULL)) {
43386 + /* real_root is pinned, we don't need to hold a reference */
43387 + read_lock(&gr_inode_lock);
43388 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43389 + __get_dev(real_root.dentry), role);
43390 + read_unlock(&gr_inode_lock);
43391 + }
43392 +out:
43393 + br_read_unlock(vfsmount_lock);
43394 + write_sequnlock(&rename_lock);
43395 +
43396 + BUG_ON(retval == NULL);
43397 +
43398 + return retval;
43399 +}
43400 +
43401 +static void
43402 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43403 +{
43404 + struct task_struct *task = current;
43405 + const struct cred *cred = current_cred();
43406 +
43407 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43408 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43409 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43410 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43411 +
43412 + return;
43413 +}
43414 +
43415 +static void
43416 +gr_log_learn_sysctl(const char *path, const __u32 mode)
43417 +{
43418 + struct task_struct *task = current;
43419 + const struct cred *cred = current_cred();
43420 +
43421 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43422 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43423 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43424 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43425 +
43426 + return;
43427 +}
43428 +
43429 +static void
43430 +gr_log_learn_id_change(const char type, const unsigned int real,
43431 + const unsigned int effective, const unsigned int fs)
43432 +{
43433 + struct task_struct *task = current;
43434 + const struct cred *cred = current_cred();
43435 +
43436 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43437 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43438 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43439 + type, real, effective, fs, &task->signal->saved_ip);
43440 +
43441 + return;
43442 +}
43443 +
43444 +__u32
43445 +gr_check_link(const struct dentry * new_dentry,
43446 + const struct dentry * parent_dentry,
43447 + const struct vfsmount * parent_mnt,
43448 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43449 +{
43450 + struct acl_object_label *obj;
43451 + __u32 oldmode, newmode;
43452 + __u32 needmode;
43453 +
43454 + if (unlikely(!(gr_status & GR_READY)))
43455 + return (GR_CREATE | GR_LINK);
43456 +
43457 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43458 + oldmode = obj->mode;
43459 +
43460 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43461 + oldmode |= (GR_CREATE | GR_LINK);
43462 +
43463 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43464 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43465 + needmode |= GR_SETID | GR_AUDIT_SETID;
43466 +
43467 + newmode =
43468 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
43469 + oldmode | needmode);
43470 +
43471 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43472 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43473 + GR_INHERIT | GR_AUDIT_INHERIT);
43474 +
43475 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43476 + goto bad;
43477 +
43478 + if ((oldmode & needmode) != needmode)
43479 + goto bad;
43480 +
43481 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43482 + if ((newmode & needmode) != needmode)
43483 + goto bad;
43484 +
43485 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43486 + return newmode;
43487 +bad:
43488 + needmode = oldmode;
43489 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43490 + needmode |= GR_SETID;
43491 +
43492 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43493 + gr_log_learn(old_dentry, old_mnt, needmode);
43494 + return (GR_CREATE | GR_LINK);
43495 + } else if (newmode & GR_SUPPRESS)
43496 + return GR_SUPPRESS;
43497 + else
43498 + return 0;
43499 +}
43500 +
43501 +__u32
43502 +gr_search_file(const struct dentry * dentry, const __u32 mode,
43503 + const struct vfsmount * mnt)
43504 +{
43505 + __u32 retval = mode;
43506 + struct acl_subject_label *curracl;
43507 + struct acl_object_label *currobj;
43508 +
43509 + if (unlikely(!(gr_status & GR_READY)))
43510 + return (mode & ~GR_AUDITS);
43511 +
43512 + curracl = current->acl;
43513 +
43514 + currobj = chk_obj_label(dentry, mnt, curracl);
43515 + retval = currobj->mode & mode;
43516 +
43517 + /* if we're opening a specified transfer file for writing
43518 + (e.g. /dev/initctl), then transfer our role to init
43519 + */
43520 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43521 + current->role->roletype & GR_ROLE_PERSIST)) {
43522 + struct task_struct *task = init_pid_ns.child_reaper;
43523 +
43524 + if (task->role != current->role) {
43525 + task->acl_sp_role = 0;
43526 + task->acl_role_id = current->acl_role_id;
43527 + task->role = current->role;
43528 + rcu_read_lock();
43529 + read_lock(&grsec_exec_file_lock);
43530 + gr_apply_subject_to_task(task);
43531 + read_unlock(&grsec_exec_file_lock);
43532 + rcu_read_unlock();
43533 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43534 + }
43535 + }
43536 +
43537 + if (unlikely
43538 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43539 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43540 + __u32 new_mode = mode;
43541 +
43542 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43543 +
43544 + retval = new_mode;
43545 +
43546 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43547 + new_mode |= GR_INHERIT;
43548 +
43549 + if (!(mode & GR_NOLEARN))
43550 + gr_log_learn(dentry, mnt, new_mode);
43551 + }
43552 +
43553 + return retval;
43554 +}
43555 +
43556 +__u32
43557 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43558 + const struct vfsmount * mnt, const __u32 mode)
43559 +{
43560 + struct name_entry *match;
43561 + struct acl_object_label *matchpo;
43562 + struct acl_subject_label *curracl;
43563 + char *path;
43564 + __u32 retval;
43565 +
43566 + if (unlikely(!(gr_status & GR_READY)))
43567 + return (mode & ~GR_AUDITS);
43568 +
43569 + preempt_disable();
43570 + path = gr_to_filename_rbac(new_dentry, mnt);
43571 + match = lookup_name_entry_create(path);
43572 +
43573 + if (!match)
43574 + goto check_parent;
43575 +
43576 + curracl = current->acl;
43577 +
43578 + read_lock(&gr_inode_lock);
43579 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43580 + read_unlock(&gr_inode_lock);
43581 +
43582 + if (matchpo) {
43583 + if ((matchpo->mode & mode) !=
43584 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
43585 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43586 + __u32 new_mode = mode;
43587 +
43588 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43589 +
43590 + gr_log_learn(new_dentry, mnt, new_mode);
43591 +
43592 + preempt_enable();
43593 + return new_mode;
43594 + }
43595 + preempt_enable();
43596 + return (matchpo->mode & mode);
43597 + }
43598 +
43599 + check_parent:
43600 + curracl = current->acl;
43601 +
43602 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43603 + retval = matchpo->mode & mode;
43604 +
43605 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43606 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43607 + __u32 new_mode = mode;
43608 +
43609 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43610 +
43611 + gr_log_learn(new_dentry, mnt, new_mode);
43612 + preempt_enable();
43613 + return new_mode;
43614 + }
43615 +
43616 + preempt_enable();
43617 + return retval;
43618 +}
43619 +
43620 +int
43621 +gr_check_hidden_task(const struct task_struct *task)
43622 +{
43623 + if (unlikely(!(gr_status & GR_READY)))
43624 + return 0;
43625 +
43626 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43627 + return 1;
43628 +
43629 + return 0;
43630 +}
43631 +
43632 +int
43633 +gr_check_protected_task(const struct task_struct *task)
43634 +{
43635 + if (unlikely(!(gr_status & GR_READY) || !task))
43636 + return 0;
43637 +
43638 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43639 + task->acl != current->acl)
43640 + return 1;
43641 +
43642 + return 0;
43643 +}
43644 +
43645 +int
43646 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43647 +{
43648 + struct task_struct *p;
43649 + int ret = 0;
43650 +
43651 + if (unlikely(!(gr_status & GR_READY) || !pid))
43652 + return ret;
43653 +
43654 + read_lock(&tasklist_lock);
43655 + do_each_pid_task(pid, type, p) {
43656 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43657 + p->acl != current->acl) {
43658 + ret = 1;
43659 + goto out;
43660 + }
43661 + } while_each_pid_task(pid, type, p);
43662 +out:
43663 + read_unlock(&tasklist_lock);
43664 +
43665 + return ret;
43666 +}
43667 +
43668 +void
43669 +gr_copy_label(struct task_struct *tsk)
43670 +{
43671 + tsk->signal->used_accept = 0;
43672 + tsk->acl_sp_role = 0;
43673 + tsk->acl_role_id = current->acl_role_id;
43674 + tsk->acl = current->acl;
43675 + tsk->role = current->role;
43676 + tsk->signal->curr_ip = current->signal->curr_ip;
43677 + tsk->signal->saved_ip = current->signal->saved_ip;
43678 + if (current->exec_file)
43679 + get_file(current->exec_file);
43680 + tsk->exec_file = current->exec_file;
43681 + tsk->is_writable = current->is_writable;
43682 + if (unlikely(current->signal->used_accept)) {
43683 + current->signal->curr_ip = 0;
43684 + current->signal->saved_ip = 0;
43685 + }
43686 +
43687 + return;
43688 +}
43689 +
43690 +static void
43691 +gr_set_proc_res(struct task_struct *task)
43692 +{
43693 + struct acl_subject_label *proc;
43694 + unsigned short i;
43695 +
43696 + proc = task->acl;
43697 +
43698 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43699 + return;
43700 +
43701 + for (i = 0; i < RLIM_NLIMITS; i++) {
43702 + if (!(proc->resmask & (1 << i)))
43703 + continue;
43704 +
43705 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43706 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43707 + }
43708 +
43709 + return;
43710 +}
43711 +
43712 +extern int __gr_process_user_ban(struct user_struct *user);
43713 +
43714 +int
43715 +gr_check_user_change(int real, int effective, int fs)
43716 +{
43717 + unsigned int i;
43718 + __u16 num;
43719 + uid_t *uidlist;
43720 + int curuid;
43721 + int realok = 0;
43722 + int effectiveok = 0;
43723 + int fsok = 0;
43724 +
43725 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43726 + struct user_struct *user;
43727 +
43728 + if (real == -1)
43729 + goto skipit;
43730 +
43731 + user = find_user(real);
43732 + if (user == NULL)
43733 + goto skipit;
43734 +
43735 + if (__gr_process_user_ban(user)) {
43736 + /* for find_user */
43737 + free_uid(user);
43738 + return 1;
43739 + }
43740 +
43741 + /* for find_user */
43742 + free_uid(user);
43743 +
43744 +skipit:
43745 +#endif
43746 +
43747 + if (unlikely(!(gr_status & GR_READY)))
43748 + return 0;
43749 +
43750 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43751 + gr_log_learn_id_change('u', real, effective, fs);
43752 +
43753 + num = current->acl->user_trans_num;
43754 + uidlist = current->acl->user_transitions;
43755 +
43756 + if (uidlist == NULL)
43757 + return 0;
43758 +
43759 + if (real == -1)
43760 + realok = 1;
43761 + if (effective == -1)
43762 + effectiveok = 1;
43763 + if (fs == -1)
43764 + fsok = 1;
43765 +
43766 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
43767 + for (i = 0; i < num; i++) {
43768 + curuid = (int)uidlist[i];
43769 + if (real == curuid)
43770 + realok = 1;
43771 + if (effective == curuid)
43772 + effectiveok = 1;
43773 + if (fs == curuid)
43774 + fsok = 1;
43775 + }
43776 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
43777 + for (i = 0; i < num; i++) {
43778 + curuid = (int)uidlist[i];
43779 + if (real == curuid)
43780 + break;
43781 + if (effective == curuid)
43782 + break;
43783 + if (fs == curuid)
43784 + break;
43785 + }
43786 + /* not in deny list */
43787 + if (i == num) {
43788 + realok = 1;
43789 + effectiveok = 1;
43790 + fsok = 1;
43791 + }
43792 + }
43793 +
43794 + if (realok && effectiveok && fsok)
43795 + return 0;
43796 + else {
43797 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43798 + return 1;
43799 + }
43800 +}
43801 +
43802 +int
43803 +gr_check_group_change(int real, int effective, int fs)
43804 +{
43805 + unsigned int i;
43806 + __u16 num;
43807 + gid_t *gidlist;
43808 + int curgid;
43809 + int realok = 0;
43810 + int effectiveok = 0;
43811 + int fsok = 0;
43812 +
43813 + if (unlikely(!(gr_status & GR_READY)))
43814 + return 0;
43815 +
43816 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43817 + gr_log_learn_id_change('g', real, effective, fs);
43818 +
43819 + num = current->acl->group_trans_num;
43820 + gidlist = current->acl->group_transitions;
43821 +
43822 + if (gidlist == NULL)
43823 + return 0;
43824 +
43825 + if (real == -1)
43826 + realok = 1;
43827 + if (effective == -1)
43828 + effectiveok = 1;
43829 + if (fs == -1)
43830 + fsok = 1;
43831 +
43832 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
43833 + for (i = 0; i < num; i++) {
43834 + curgid = (int)gidlist[i];
43835 + if (real == curgid)
43836 + realok = 1;
43837 + if (effective == curgid)
43838 + effectiveok = 1;
43839 + if (fs == curgid)
43840 + fsok = 1;
43841 + }
43842 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
43843 + for (i = 0; i < num; i++) {
43844 + curgid = (int)gidlist[i];
43845 + if (real == curgid)
43846 + break;
43847 + if (effective == curgid)
43848 + break;
43849 + if (fs == curgid)
43850 + break;
43851 + }
43852 + /* not in deny list */
43853 + if (i == num) {
43854 + realok = 1;
43855 + effectiveok = 1;
43856 + fsok = 1;
43857 + }
43858 + }
43859 +
43860 + if (realok && effectiveok && fsok)
43861 + return 0;
43862 + else {
43863 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43864 + return 1;
43865 + }
43866 +}
43867 +
43868 +void
43869 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43870 +{
43871 + struct acl_role_label *role = task->role;
43872 + struct acl_subject_label *subj = NULL;
43873 + struct acl_object_label *obj;
43874 + struct file *filp;
43875 +
43876 + if (unlikely(!(gr_status & GR_READY)))
43877 + return;
43878 +
43879 + filp = task->exec_file;
43880 +
43881 + /* kernel process, we'll give them the kernel role */
43882 + if (unlikely(!filp)) {
43883 + task->role = kernel_role;
43884 + task->acl = kernel_role->root_label;
43885 + return;
43886 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43887 + role = lookup_acl_role_label(task, uid, gid);
43888 +
43889 + /* perform subject lookup in possibly new role
43890 + we can use this result below in the case where role == task->role
43891 + */
43892 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43893 +
43894 + /* if we changed uid/gid, but result in the same role
43895 + and are using inheritance, don't lose the inherited subject
43896 + if current subject is other than what normal lookup
43897 + would result in, we arrived via inheritance, don't
43898 + lose subject
43899 + */
43900 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43901 + (subj == task->acl)))
43902 + task->acl = subj;
43903 +
43904 + task->role = role;
43905 +
43906 + task->is_writable = 0;
43907 +
43908 + /* ignore additional mmap checks for processes that are writable
43909 + by the default ACL */
43910 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43911 + if (unlikely(obj->mode & GR_WRITE))
43912 + task->is_writable = 1;
43913 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43914 + if (unlikely(obj->mode & GR_WRITE))
43915 + task->is_writable = 1;
43916 +
43917 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43918 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43919 +#endif
43920 +
43921 + gr_set_proc_res(task);
43922 +
43923 + return;
43924 +}
43925 +
43926 +int
43927 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
43928 + const int unsafe_share)
43929 +{
43930 + struct task_struct *task = current;
43931 + struct acl_subject_label *newacl;
43932 + struct acl_object_label *obj;
43933 + __u32 retmode;
43934 +
43935 + if (unlikely(!(gr_status & GR_READY)))
43936 + return 0;
43937 +
43938 + newacl = chk_subj_label(dentry, mnt, task->role);
43939 +
43940 + task_lock(task);
43941 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
43942 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
43943 + !(task->role->roletype & GR_ROLE_GOD) &&
43944 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
43945 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
43946 + task_unlock(task);
43947 + if (unsafe_share)
43948 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
43949 + else
43950 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
43951 + return -EACCES;
43952 + }
43953 + task_unlock(task);
43954 +
43955 + obj = chk_obj_label(dentry, mnt, task->acl);
43956 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
43957 +
43958 + if (!(task->acl->mode & GR_INHERITLEARN) &&
43959 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
43960 + if (obj->nested)
43961 + task->acl = obj->nested;
43962 + else
43963 + task->acl = newacl;
43964 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
43965 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
43966 +
43967 + task->is_writable = 0;
43968 +
43969 + /* ignore additional mmap checks for processes that are writable
43970 + by the default ACL */
43971 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
43972 + if (unlikely(obj->mode & GR_WRITE))
43973 + task->is_writable = 1;
43974 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
43975 + if (unlikely(obj->mode & GR_WRITE))
43976 + task->is_writable = 1;
43977 +
43978 + gr_set_proc_res(task);
43979 +
43980 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
43981 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
43982 +#endif
43983 + return 0;
43984 +}
43985 +
43986 +/* always called with valid inodev ptr */
43987 +static void
43988 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
43989 +{
43990 + struct acl_object_label *matchpo;
43991 + struct acl_subject_label *matchps;
43992 + struct acl_subject_label *subj;
43993 + struct acl_role_label *role;
43994 + unsigned int x;
43995 +
43996 + FOR_EACH_ROLE_START(role)
43997 + FOR_EACH_SUBJECT_START(role, subj, x)
43998 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
43999 + matchpo->mode |= GR_DELETED;
44000 + FOR_EACH_SUBJECT_END(subj,x)
44001 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44002 + if (subj->inode == ino && subj->device == dev)
44003 + subj->mode |= GR_DELETED;
44004 + FOR_EACH_NESTED_SUBJECT_END(subj)
44005 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
44006 + matchps->mode |= GR_DELETED;
44007 + FOR_EACH_ROLE_END(role)
44008 +
44009 + inodev->nentry->deleted = 1;
44010 +
44011 + return;
44012 +}
44013 +
44014 +void
44015 +gr_handle_delete(const ino_t ino, const dev_t dev)
44016 +{
44017 + struct inodev_entry *inodev;
44018 +
44019 + if (unlikely(!(gr_status & GR_READY)))
44020 + return;
44021 +
44022 + write_lock(&gr_inode_lock);
44023 + inodev = lookup_inodev_entry(ino, dev);
44024 + if (inodev != NULL)
44025 + do_handle_delete(inodev, ino, dev);
44026 + write_unlock(&gr_inode_lock);
44027 +
44028 + return;
44029 +}
44030 +
44031 +static void
44032 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
44033 + const ino_t newinode, const dev_t newdevice,
44034 + struct acl_subject_label *subj)
44035 +{
44036 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
44037 + struct acl_object_label *match;
44038 +
44039 + match = subj->obj_hash[index];
44040 +
44041 + while (match && (match->inode != oldinode ||
44042 + match->device != olddevice ||
44043 + !(match->mode & GR_DELETED)))
44044 + match = match->next;
44045 +
44046 + if (match && (match->inode == oldinode)
44047 + && (match->device == olddevice)
44048 + && (match->mode & GR_DELETED)) {
44049 + if (match->prev == NULL) {
44050 + subj->obj_hash[index] = match->next;
44051 + if (match->next != NULL)
44052 + match->next->prev = NULL;
44053 + } else {
44054 + match->prev->next = match->next;
44055 + if (match->next != NULL)
44056 + match->next->prev = match->prev;
44057 + }
44058 + match->prev = NULL;
44059 + match->next = NULL;
44060 + match->inode = newinode;
44061 + match->device = newdevice;
44062 + match->mode &= ~GR_DELETED;
44063 +
44064 + insert_acl_obj_label(match, subj);
44065 + }
44066 +
44067 + return;
44068 +}
44069 +
44070 +static void
44071 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44072 + const ino_t newinode, const dev_t newdevice,
44073 + struct acl_role_label *role)
44074 +{
44075 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44076 + struct acl_subject_label *match;
44077 +
44078 + match = role->subj_hash[index];
44079 +
44080 + while (match && (match->inode != oldinode ||
44081 + match->device != olddevice ||
44082 + !(match->mode & GR_DELETED)))
44083 + match = match->next;
44084 +
44085 + if (match && (match->inode == oldinode)
44086 + && (match->device == olddevice)
44087 + && (match->mode & GR_DELETED)) {
44088 + if (match->prev == NULL) {
44089 + role->subj_hash[index] = match->next;
44090 + if (match->next != NULL)
44091 + match->next->prev = NULL;
44092 + } else {
44093 + match->prev->next = match->next;
44094 + if (match->next != NULL)
44095 + match->next->prev = match->prev;
44096 + }
44097 + match->prev = NULL;
44098 + match->next = NULL;
44099 + match->inode = newinode;
44100 + match->device = newdevice;
44101 + match->mode &= ~GR_DELETED;
44102 +
44103 + insert_acl_subj_label(match, role);
44104 + }
44105 +
44106 + return;
44107 +}
44108 +
44109 +static void
44110 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44111 + const ino_t newinode, const dev_t newdevice)
44112 +{
44113 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44114 + struct inodev_entry *match;
44115 +
44116 + match = inodev_set.i_hash[index];
44117 +
44118 + while (match && (match->nentry->inode != oldinode ||
44119 + match->nentry->device != olddevice || !match->nentry->deleted))
44120 + match = match->next;
44121 +
44122 + if (match && (match->nentry->inode == oldinode)
44123 + && (match->nentry->device == olddevice) &&
44124 + match->nentry->deleted) {
44125 + if (match->prev == NULL) {
44126 + inodev_set.i_hash[index] = match->next;
44127 + if (match->next != NULL)
44128 + match->next->prev = NULL;
44129 + } else {
44130 + match->prev->next = match->next;
44131 + if (match->next != NULL)
44132 + match->next->prev = match->prev;
44133 + }
44134 + match->prev = NULL;
44135 + match->next = NULL;
44136 + match->nentry->inode = newinode;
44137 + match->nentry->device = newdevice;
44138 + match->nentry->deleted = 0;
44139 +
44140 + insert_inodev_entry(match);
44141 + }
44142 +
44143 + return;
44144 +}
44145 +
44146 +static void
44147 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44148 + const struct vfsmount *mnt)
44149 +{
44150 + struct acl_subject_label *subj;
44151 + struct acl_role_label *role;
44152 + unsigned int x;
44153 + ino_t ino = dentry->d_inode->i_ino;
44154 + dev_t dev = __get_dev(dentry);
44155 +
44156 + FOR_EACH_ROLE_START(role)
44157 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44158 +
44159 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44160 + if ((subj->inode == ino) && (subj->device == dev)) {
44161 + subj->inode = ino;
44162 + subj->device = dev;
44163 + }
44164 + FOR_EACH_NESTED_SUBJECT_END(subj)
44165 + FOR_EACH_SUBJECT_START(role, subj, x)
44166 + update_acl_obj_label(matchn->inode, matchn->device,
44167 + ino, dev, subj);
44168 + FOR_EACH_SUBJECT_END(subj,x)
44169 + FOR_EACH_ROLE_END(role)
44170 +
44171 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44172 +
44173 + return;
44174 +}
44175 +
44176 +void
44177 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44178 +{
44179 + struct name_entry *matchn;
44180 +
44181 + if (unlikely(!(gr_status & GR_READY)))
44182 + return;
44183 +
44184 + preempt_disable();
44185 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44186 +
44187 + if (unlikely((unsigned long)matchn)) {
44188 + write_lock(&gr_inode_lock);
44189 + do_handle_create(matchn, dentry, mnt);
44190 + write_unlock(&gr_inode_lock);
44191 + }
44192 + preempt_enable();
44193 +
44194 + return;
44195 +}
44196 +
44197 +void
44198 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44199 + struct dentry *old_dentry,
44200 + struct dentry *new_dentry,
44201 + struct vfsmount *mnt, const __u8 replace)
44202 +{
44203 + struct name_entry *matchn;
44204 + struct inodev_entry *inodev;
44205 + ino_t old_ino = old_dentry->d_inode->i_ino;
44206 + dev_t old_dev = __get_dev(old_dentry);
44207 +
44208 + /* vfs_rename swaps the name and parent link for old_dentry and
44209 + new_dentry
44210 + at this point, old_dentry has the new name, parent link, and inode
44211 + for the renamed file
44212 + if a file is being replaced by a rename, new_dentry has the inode
44213 + and name for the replaced file
44214 + */
44215 +
44216 + if (unlikely(!(gr_status & GR_READY)))
44217 + return;
44218 +
44219 + preempt_disable();
44220 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44221 +
44222 + /* we wouldn't have to check d_inode if it weren't for
44223 + NFS silly-renaming
44224 + */
44225 +
44226 + write_lock(&gr_inode_lock);
44227 + if (unlikely(replace && new_dentry->d_inode)) {
44228 + ino_t new_ino = new_dentry->d_inode->i_ino;
44229 + dev_t new_dev = __get_dev(new_dentry);
44230 +
44231 + inodev = lookup_inodev_entry(new_ino, new_dev);
44232 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44233 + do_handle_delete(inodev, new_ino, new_dev);
44234 + }
44235 +
44236 + inodev = lookup_inodev_entry(old_ino, old_dev);
44237 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44238 + do_handle_delete(inodev, old_ino, old_dev);
44239 +
44240 + if (unlikely((unsigned long)matchn))
44241 + do_handle_create(matchn, old_dentry, mnt);
44242 +
44243 + write_unlock(&gr_inode_lock);
44244 + preempt_enable();
44245 +
44246 + return;
44247 +}
44248 +
44249 +static int
44250 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44251 + unsigned char **sum)
44252 +{
44253 + struct acl_role_label *r;
44254 + struct role_allowed_ip *ipp;
44255 + struct role_transition *trans;
44256 + unsigned int i;
44257 + int found = 0;
44258 + u32 curr_ip = current->signal->curr_ip;
44259 +
44260 + current->signal->saved_ip = curr_ip;
44261 +
44262 + /* check transition table */
44263 +
44264 + for (trans = current->role->transitions; trans; trans = trans->next) {
44265 + if (!strcmp(rolename, trans->rolename)) {
44266 + found = 1;
44267 + break;
44268 + }
44269 + }
44270 +
44271 + if (!found)
44272 + return 0;
44273 +
44274 + /* handle special roles that do not require authentication
44275 + and check ip */
44276 +
44277 + FOR_EACH_ROLE_START(r)
44278 + if (!strcmp(rolename, r->rolename) &&
44279 + (r->roletype & GR_ROLE_SPECIAL)) {
44280 + found = 0;
44281 + if (r->allowed_ips != NULL) {
44282 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44283 + if ((ntohl(curr_ip) & ipp->netmask) ==
44284 + (ntohl(ipp->addr) & ipp->netmask))
44285 + found = 1;
44286 + }
44287 + } else
44288 + found = 2;
44289 + if (!found)
44290 + return 0;
44291 +
44292 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44293 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44294 + *salt = NULL;
44295 + *sum = NULL;
44296 + return 1;
44297 + }
44298 + }
44299 + FOR_EACH_ROLE_END(r)
44300 +
44301 + for (i = 0; i < num_sprole_pws; i++) {
44302 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44303 + *salt = acl_special_roles[i]->salt;
44304 + *sum = acl_special_roles[i]->sum;
44305 + return 1;
44306 + }
44307 + }
44308 +
44309 + return 0;
44310 +}
44311 +
44312 +static void
44313 +assign_special_role(char *rolename)
44314 +{
44315 + struct acl_object_label *obj;
44316 + struct acl_role_label *r;
44317 + struct acl_role_label *assigned = NULL;
44318 + struct task_struct *tsk;
44319 + struct file *filp;
44320 +
44321 + FOR_EACH_ROLE_START(r)
44322 + if (!strcmp(rolename, r->rolename) &&
44323 + (r->roletype & GR_ROLE_SPECIAL)) {
44324 + assigned = r;
44325 + break;
44326 + }
44327 + FOR_EACH_ROLE_END(r)
44328 +
44329 + if (!assigned)
44330 + return;
44331 +
44332 + read_lock(&tasklist_lock);
44333 + read_lock(&grsec_exec_file_lock);
44334 +
44335 + tsk = current->real_parent;
44336 + if (tsk == NULL)
44337 + goto out_unlock;
44338 +
44339 + filp = tsk->exec_file;
44340 + if (filp == NULL)
44341 + goto out_unlock;
44342 +
44343 + tsk->is_writable = 0;
44344 +
44345 + tsk->acl_sp_role = 1;
44346 + tsk->acl_role_id = ++acl_sp_role_value;
44347 + tsk->role = assigned;
44348 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44349 +
44350 + /* ignore additional mmap checks for processes that are writable
44351 + by the default ACL */
44352 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44353 + if (unlikely(obj->mode & GR_WRITE))
44354 + tsk->is_writable = 1;
44355 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44356 + if (unlikely(obj->mode & GR_WRITE))
44357 + tsk->is_writable = 1;
44358 +
44359 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44360 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44361 +#endif
44362 +
44363 +out_unlock:
44364 + read_unlock(&grsec_exec_file_lock);
44365 + read_unlock(&tasklist_lock);
44366 + return;
44367 +}
44368 +
44369 +int gr_check_secure_terminal(struct task_struct *task)
44370 +{
44371 + struct task_struct *p, *p2, *p3;
44372 + struct files_struct *files;
44373 + struct fdtable *fdt;
44374 + struct file *our_file = NULL, *file;
44375 + int i;
44376 +
44377 + if (task->signal->tty == NULL)
44378 + return 1;
44379 +
44380 + files = get_files_struct(task);
44381 + if (files != NULL) {
44382 + rcu_read_lock();
44383 + fdt = files_fdtable(files);
44384 + for (i=0; i < fdt->max_fds; i++) {
44385 + file = fcheck_files(files, i);
44386 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44387 + get_file(file);
44388 + our_file = file;
44389 + }
44390 + }
44391 + rcu_read_unlock();
44392 + put_files_struct(files);
44393 + }
44394 +
44395 + if (our_file == NULL)
44396 + return 1;
44397 +
44398 + read_lock(&tasklist_lock);
44399 + do_each_thread(p2, p) {
44400 + files = get_files_struct(p);
44401 + if (files == NULL ||
44402 + (p->signal && p->signal->tty == task->signal->tty)) {
44403 + if (files != NULL)
44404 + put_files_struct(files);
44405 + continue;
44406 + }
44407 + rcu_read_lock();
44408 + fdt = files_fdtable(files);
44409 + for (i=0; i < fdt->max_fds; i++) {
44410 + file = fcheck_files(files, i);
44411 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44412 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44413 + p3 = task;
44414 + while (p3->pid > 0) {
44415 + if (p3 == p)
44416 + break;
44417 + p3 = p3->real_parent;
44418 + }
44419 + if (p3 == p)
44420 + break;
44421 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44422 + gr_handle_alertkill(p);
44423 + rcu_read_unlock();
44424 + put_files_struct(files);
44425 + read_unlock(&tasklist_lock);
44426 + fput(our_file);
44427 + return 0;
44428 + }
44429 + }
44430 + rcu_read_unlock();
44431 + put_files_struct(files);
44432 + } while_each_thread(p2, p);
44433 + read_unlock(&tasklist_lock);
44434 +
44435 + fput(our_file);
44436 + return 1;
44437 +}
44438 +
44439 +ssize_t
44440 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44441 +{
44442 + struct gr_arg_wrapper uwrap;
44443 + unsigned char *sprole_salt = NULL;
44444 + unsigned char *sprole_sum = NULL;
44445 + int error = sizeof (struct gr_arg_wrapper);
44446 + int error2 = 0;
44447 +
44448 + mutex_lock(&gr_dev_mutex);
44449 +
44450 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44451 + error = -EPERM;
44452 + goto out;
44453 + }
44454 +
44455 + if (count != sizeof (struct gr_arg_wrapper)) {
44456 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44457 + error = -EINVAL;
44458 + goto out;
44459 + }
44460 +
44461 +
44462 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44463 + gr_auth_expires = 0;
44464 + gr_auth_attempts = 0;
44465 + }
44466 +
44467 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44468 + error = -EFAULT;
44469 + goto out;
44470 + }
44471 +
44472 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44473 + error = -EINVAL;
44474 + goto out;
44475 + }
44476 +
44477 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44478 + error = -EFAULT;
44479 + goto out;
44480 + }
44481 +
44482 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44483 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44484 + time_after(gr_auth_expires, get_seconds())) {
44485 + error = -EBUSY;
44486 + goto out;
44487 + }
44488 +
44489 + /* if non-root trying to do anything other than use a special role,
44490 + do not attempt authentication, do not count towards authentication
44491 + locking
44492 + */
44493 +
44494 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44495 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44496 + current_uid()) {
44497 + error = -EPERM;
44498 + goto out;
44499 + }
44500 +
44501 + /* ensure pw and special role name are null terminated */
44502 +
44503 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44504 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44505 +
44506 + /* Okay.
44507 + * We have our enough of the argument structure..(we have yet
44508 + * to copy_from_user the tables themselves) . Copy the tables
44509 + * only if we need them, i.e. for loading operations. */
44510 +
44511 + switch (gr_usermode->mode) {
44512 + case GR_STATUS:
44513 + if (gr_status & GR_READY) {
44514 + error = 1;
44515 + if (!gr_check_secure_terminal(current))
44516 + error = 3;
44517 + } else
44518 + error = 2;
44519 + goto out;
44520 + case GR_SHUTDOWN:
44521 + if ((gr_status & GR_READY)
44522 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44523 + pax_open_kernel();
44524 + gr_status &= ~GR_READY;
44525 + pax_close_kernel();
44526 +
44527 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44528 + free_variables();
44529 + memset(gr_usermode, 0, sizeof (struct gr_arg));
44530 + memset(gr_system_salt, 0, GR_SALT_LEN);
44531 + memset(gr_system_sum, 0, GR_SHA_LEN);
44532 + } else if (gr_status & GR_READY) {
44533 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44534 + error = -EPERM;
44535 + } else {
44536 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44537 + error = -EAGAIN;
44538 + }
44539 + break;
44540 + case GR_ENABLE:
44541 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44542 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44543 + else {
44544 + if (gr_status & GR_READY)
44545 + error = -EAGAIN;
44546 + else
44547 + error = error2;
44548 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44549 + }
44550 + break;
44551 + case GR_RELOAD:
44552 + if (!(gr_status & GR_READY)) {
44553 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44554 + error = -EAGAIN;
44555 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44556 + preempt_disable();
44557 +
44558 + pax_open_kernel();
44559 + gr_status &= ~GR_READY;
44560 + pax_close_kernel();
44561 +
44562 + free_variables();
44563 + if (!(error2 = gracl_init(gr_usermode))) {
44564 + preempt_enable();
44565 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44566 + } else {
44567 + preempt_enable();
44568 + error = error2;
44569 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44570 + }
44571 + } else {
44572 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44573 + error = -EPERM;
44574 + }
44575 + break;
44576 + case GR_SEGVMOD:
44577 + if (unlikely(!(gr_status & GR_READY))) {
44578 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44579 + error = -EAGAIN;
44580 + break;
44581 + }
44582 +
44583 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44584 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44585 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44586 + struct acl_subject_label *segvacl;
44587 + segvacl =
44588 + lookup_acl_subj_label(gr_usermode->segv_inode,
44589 + gr_usermode->segv_device,
44590 + current->role);
44591 + if (segvacl) {
44592 + segvacl->crashes = 0;
44593 + segvacl->expires = 0;
44594 + }
44595 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44596 + gr_remove_uid(gr_usermode->segv_uid);
44597 + }
44598 + } else {
44599 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44600 + error = -EPERM;
44601 + }
44602 + break;
44603 + case GR_SPROLE:
44604 + case GR_SPROLEPAM:
44605 + if (unlikely(!(gr_status & GR_READY))) {
44606 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44607 + error = -EAGAIN;
44608 + break;
44609 + }
44610 +
44611 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44612 + current->role->expires = 0;
44613 + current->role->auth_attempts = 0;
44614 + }
44615 +
44616 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44617 + time_after(current->role->expires, get_seconds())) {
44618 + error = -EBUSY;
44619 + goto out;
44620 + }
44621 +
44622 + if (lookup_special_role_auth
44623 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44624 + && ((!sprole_salt && !sprole_sum)
44625 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44626 + char *p = "";
44627 + assign_special_role(gr_usermode->sp_role);
44628 + read_lock(&tasklist_lock);
44629 + if (current->real_parent)
44630 + p = current->real_parent->role->rolename;
44631 + read_unlock(&tasklist_lock);
44632 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44633 + p, acl_sp_role_value);
44634 + } else {
44635 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44636 + error = -EPERM;
44637 + if(!(current->role->auth_attempts++))
44638 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44639 +
44640 + goto out;
44641 + }
44642 + break;
44643 + case GR_UNSPROLE:
44644 + if (unlikely(!(gr_status & GR_READY))) {
44645 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44646 + error = -EAGAIN;
44647 + break;
44648 + }
44649 +
44650 + if (current->role->roletype & GR_ROLE_SPECIAL) {
44651 + char *p = "";
44652 + int i = 0;
44653 +
44654 + read_lock(&tasklist_lock);
44655 + if (current->real_parent) {
44656 + p = current->real_parent->role->rolename;
44657 + i = current->real_parent->acl_role_id;
44658 + }
44659 + read_unlock(&tasklist_lock);
44660 +
44661 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44662 + gr_set_acls(1);
44663 + } else {
44664 + error = -EPERM;
44665 + goto out;
44666 + }
44667 + break;
44668 + default:
44669 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44670 + error = -EINVAL;
44671 + break;
44672 + }
44673 +
44674 + if (error != -EPERM)
44675 + goto out;
44676 +
44677 + if(!(gr_auth_attempts++))
44678 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44679 +
44680 + out:
44681 + mutex_unlock(&gr_dev_mutex);
44682 + return error;
44683 +}
44684 +
44685 +/* must be called with
44686 + rcu_read_lock();
44687 + read_lock(&tasklist_lock);
44688 + read_lock(&grsec_exec_file_lock);
44689 +*/
44690 +int gr_apply_subject_to_task(struct task_struct *task)
44691 +{
44692 + struct acl_object_label *obj;
44693 + char *tmpname;
44694 + struct acl_subject_label *tmpsubj;
44695 + struct file *filp;
44696 + struct name_entry *nmatch;
44697 +
44698 + filp = task->exec_file;
44699 + if (filp == NULL)
44700 + return 0;
44701 +
44702 + /* the following is to apply the correct subject
44703 + on binaries running when the RBAC system
44704 + is enabled, when the binaries have been
44705 + replaced or deleted since their execution
44706 + -----
44707 + when the RBAC system starts, the inode/dev
44708 + from exec_file will be one the RBAC system
44709 + is unaware of. It only knows the inode/dev
44710 + of the present file on disk, or the absence
44711 + of it.
44712 + */
44713 + preempt_disable();
44714 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44715 +
44716 + nmatch = lookup_name_entry(tmpname);
44717 + preempt_enable();
44718 + tmpsubj = NULL;
44719 + if (nmatch) {
44720 + if (nmatch->deleted)
44721 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44722 + else
44723 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44724 + if (tmpsubj != NULL)
44725 + task->acl = tmpsubj;
44726 + }
44727 + if (tmpsubj == NULL)
44728 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44729 + task->role);
44730 + if (task->acl) {
44731 + task->is_writable = 0;
44732 + /* ignore additional mmap checks for processes that are writable
44733 + by the default ACL */
44734 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44735 + if (unlikely(obj->mode & GR_WRITE))
44736 + task->is_writable = 1;
44737 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44738 + if (unlikely(obj->mode & GR_WRITE))
44739 + task->is_writable = 1;
44740 +
44741 + gr_set_proc_res(task);
44742 +
44743 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44744 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44745 +#endif
44746 + } else {
44747 + return 1;
44748 + }
44749 +
44750 + return 0;
44751 +}
44752 +
44753 +int
44754 +gr_set_acls(const int type)
44755 +{
44756 + struct task_struct *task, *task2;
44757 + struct acl_role_label *role = current->role;
44758 + __u16 acl_role_id = current->acl_role_id;
44759 + const struct cred *cred;
44760 + int ret;
44761 +
44762 + rcu_read_lock();
44763 + read_lock(&tasklist_lock);
44764 + read_lock(&grsec_exec_file_lock);
44765 + do_each_thread(task2, task) {
44766 + /* check to see if we're called from the exit handler,
44767 + if so, only replace ACLs that have inherited the admin
44768 + ACL */
44769 +
44770 + if (type && (task->role != role ||
44771 + task->acl_role_id != acl_role_id))
44772 + continue;
44773 +
44774 + task->acl_role_id = 0;
44775 + task->acl_sp_role = 0;
44776 +
44777 + if (task->exec_file) {
44778 + cred = __task_cred(task);
44779 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44780 + ret = gr_apply_subject_to_task(task);
44781 + if (ret) {
44782 + read_unlock(&grsec_exec_file_lock);
44783 + read_unlock(&tasklist_lock);
44784 + rcu_read_unlock();
44785 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44786 + return ret;
44787 + }
44788 + } else {
44789 + // it's a kernel process
44790 + task->role = kernel_role;
44791 + task->acl = kernel_role->root_label;
44792 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44793 + task->acl->mode &= ~GR_PROCFIND;
44794 +#endif
44795 + }
44796 + } while_each_thread(task2, task);
44797 + read_unlock(&grsec_exec_file_lock);
44798 + read_unlock(&tasklist_lock);
44799 + rcu_read_unlock();
44800 +
44801 + return 0;
44802 +}
44803 +
44804 +void
44805 +gr_learn_resource(const struct task_struct *task,
44806 + const int res, const unsigned long wanted, const int gt)
44807 +{
44808 + struct acl_subject_label *acl;
44809 + const struct cred *cred;
44810 +
44811 + if (unlikely((gr_status & GR_READY) &&
44812 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44813 + goto skip_reslog;
44814 +
44815 +#ifdef CONFIG_GRKERNSEC_RESLOG
44816 + gr_log_resource(task, res, wanted, gt);
44817 +#endif
44818 + skip_reslog:
44819 +
44820 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44821 + return;
44822 +
44823 + acl = task->acl;
44824 +
44825 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44826 + !(acl->resmask & (1 << (unsigned short) res))))
44827 + return;
44828 +
44829 + if (wanted >= acl->res[res].rlim_cur) {
44830 + unsigned long res_add;
44831 +
44832 + res_add = wanted;
44833 + switch (res) {
44834 + case RLIMIT_CPU:
44835 + res_add += GR_RLIM_CPU_BUMP;
44836 + break;
44837 + case RLIMIT_FSIZE:
44838 + res_add += GR_RLIM_FSIZE_BUMP;
44839 + break;
44840 + case RLIMIT_DATA:
44841 + res_add += GR_RLIM_DATA_BUMP;
44842 + break;
44843 + case RLIMIT_STACK:
44844 + res_add += GR_RLIM_STACK_BUMP;
44845 + break;
44846 + case RLIMIT_CORE:
44847 + res_add += GR_RLIM_CORE_BUMP;
44848 + break;
44849 + case RLIMIT_RSS:
44850 + res_add += GR_RLIM_RSS_BUMP;
44851 + break;
44852 + case RLIMIT_NPROC:
44853 + res_add += GR_RLIM_NPROC_BUMP;
44854 + break;
44855 + case RLIMIT_NOFILE:
44856 + res_add += GR_RLIM_NOFILE_BUMP;
44857 + break;
44858 + case RLIMIT_MEMLOCK:
44859 + res_add += GR_RLIM_MEMLOCK_BUMP;
44860 + break;
44861 + case RLIMIT_AS:
44862 + res_add += GR_RLIM_AS_BUMP;
44863 + break;
44864 + case RLIMIT_LOCKS:
44865 + res_add += GR_RLIM_LOCKS_BUMP;
44866 + break;
44867 + case RLIMIT_SIGPENDING:
44868 + res_add += GR_RLIM_SIGPENDING_BUMP;
44869 + break;
44870 + case RLIMIT_MSGQUEUE:
44871 + res_add += GR_RLIM_MSGQUEUE_BUMP;
44872 + break;
44873 + case RLIMIT_NICE:
44874 + res_add += GR_RLIM_NICE_BUMP;
44875 + break;
44876 + case RLIMIT_RTPRIO:
44877 + res_add += GR_RLIM_RTPRIO_BUMP;
44878 + break;
44879 + case RLIMIT_RTTIME:
44880 + res_add += GR_RLIM_RTTIME_BUMP;
44881 + break;
44882 + }
44883 +
44884 + acl->res[res].rlim_cur = res_add;
44885 +
44886 + if (wanted > acl->res[res].rlim_max)
44887 + acl->res[res].rlim_max = res_add;
44888 +
44889 + /* only log the subject filename, since resource logging is supported for
44890 + single-subject learning only */
44891 + rcu_read_lock();
44892 + cred = __task_cred(task);
44893 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44894 + task->role->roletype, cred->uid, cred->gid, acl->filename,
44895 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44896 + "", (unsigned long) res, &task->signal->saved_ip);
44897 + rcu_read_unlock();
44898 + }
44899 +
44900 + return;
44901 +}
44902 +
44903 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44904 +void
44905 +pax_set_initial_flags(struct linux_binprm *bprm)
44906 +{
44907 + struct task_struct *task = current;
44908 + struct acl_subject_label *proc;
44909 + unsigned long flags;
44910 +
44911 + if (unlikely(!(gr_status & GR_READY)))
44912 + return;
44913 +
44914 + flags = pax_get_flags(task);
44915 +
44916 + proc = task->acl;
44917 +
44918 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
44919 + flags &= ~MF_PAX_PAGEEXEC;
44920 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
44921 + flags &= ~MF_PAX_SEGMEXEC;
44922 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
44923 + flags &= ~MF_PAX_RANDMMAP;
44924 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
44925 + flags &= ~MF_PAX_EMUTRAMP;
44926 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
44927 + flags &= ~MF_PAX_MPROTECT;
44928 +
44929 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
44930 + flags |= MF_PAX_PAGEEXEC;
44931 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
44932 + flags |= MF_PAX_SEGMEXEC;
44933 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
44934 + flags |= MF_PAX_RANDMMAP;
44935 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
44936 + flags |= MF_PAX_EMUTRAMP;
44937 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
44938 + flags |= MF_PAX_MPROTECT;
44939 +
44940 + pax_set_flags(task, flags);
44941 +
44942 + return;
44943 +}
44944 +#endif
44945 +
44946 +#ifdef CONFIG_SYSCTL
44947 +/* Eric Biederman likes breaking userland ABI and every inode-based security
44948 + system to save 35kb of memory */
44949 +
44950 +/* we modify the passed in filename, but adjust it back before returning */
44951 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
44952 +{
44953 + struct name_entry *nmatch;
44954 + char *p, *lastp = NULL;
44955 + struct acl_object_label *obj = NULL, *tmp;
44956 + struct acl_subject_label *tmpsubj;
44957 + char c = '\0';
44958 +
44959 + read_lock(&gr_inode_lock);
44960 +
44961 + p = name + len - 1;
44962 + do {
44963 + nmatch = lookup_name_entry(name);
44964 + if (lastp != NULL)
44965 + *lastp = c;
44966 +
44967 + if (nmatch == NULL)
44968 + goto next_component;
44969 + tmpsubj = current->acl;
44970 + do {
44971 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
44972 + if (obj != NULL) {
44973 + tmp = obj->globbed;
44974 + while (tmp) {
44975 + if (!glob_match(tmp->filename, name)) {
44976 + obj = tmp;
44977 + goto found_obj;
44978 + }
44979 + tmp = tmp->next;
44980 + }
44981 + goto found_obj;
44982 + }
44983 + } while ((tmpsubj = tmpsubj->parent_subject));
44984 +next_component:
44985 + /* end case */
44986 + if (p == name)
44987 + break;
44988 +
44989 + while (*p != '/')
44990 + p--;
44991 + if (p == name)
44992 + lastp = p + 1;
44993 + else {
44994 + lastp = p;
44995 + p--;
44996 + }
44997 + c = *lastp;
44998 + *lastp = '\0';
44999 + } while (1);
45000 +found_obj:
45001 + read_unlock(&gr_inode_lock);
45002 + /* obj returned will always be non-null */
45003 + return obj;
45004 +}
45005 +
45006 +/* returns 0 when allowing, non-zero on error
45007 + op of 0 is used for readdir, so we don't log the names of hidden files
45008 +*/
45009 +__u32
45010 +gr_handle_sysctl(const struct ctl_table *table, const int op)
45011 +{
45012 + struct ctl_table *tmp;
45013 + const char *proc_sys = "/proc/sys";
45014 + char *path;
45015 + struct acl_object_label *obj;
45016 + unsigned short len = 0, pos = 0, depth = 0, i;
45017 + __u32 err = 0;
45018 + __u32 mode = 0;
45019 +
45020 + if (unlikely(!(gr_status & GR_READY)))
45021 + return 0;
45022 +
45023 + /* for now, ignore operations on non-sysctl entries if it's not a
45024 + readdir*/
45025 + if (table->child != NULL && op != 0)
45026 + return 0;
45027 +
45028 + mode |= GR_FIND;
45029 + /* it's only a read if it's an entry, read on dirs is for readdir */
45030 + if (op & MAY_READ)
45031 + mode |= GR_READ;
45032 + if (op & MAY_WRITE)
45033 + mode |= GR_WRITE;
45034 +
45035 + preempt_disable();
45036 +
45037 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
45038 +
45039 + /* it's only a read/write if it's an actual entry, not a dir
45040 + (which are opened for readdir)
45041 + */
45042 +
45043 + /* convert the requested sysctl entry into a pathname */
45044 +
45045 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45046 + len += strlen(tmp->procname);
45047 + len++;
45048 + depth++;
45049 + }
45050 +
45051 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45052 + /* deny */
45053 + goto out;
45054 + }
45055 +
45056 + memset(path, 0, PAGE_SIZE);
45057 +
45058 + memcpy(path, proc_sys, strlen(proc_sys));
45059 +
45060 + pos += strlen(proc_sys);
45061 +
45062 + for (; depth > 0; depth--) {
45063 + path[pos] = '/';
45064 + pos++;
45065 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45066 + if (depth == i) {
45067 + memcpy(path + pos, tmp->procname,
45068 + strlen(tmp->procname));
45069 + pos += strlen(tmp->procname);
45070 + }
45071 + i++;
45072 + }
45073 + }
45074 +
45075 + obj = gr_lookup_by_name(path, pos);
45076 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45077 +
45078 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45079 + ((err & mode) != mode))) {
45080 + __u32 new_mode = mode;
45081 +
45082 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45083 +
45084 + err = 0;
45085 + gr_log_learn_sysctl(path, new_mode);
45086 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45087 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45088 + err = -ENOENT;
45089 + } else if (!(err & GR_FIND)) {
45090 + err = -ENOENT;
45091 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45092 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45093 + path, (mode & GR_READ) ? " reading" : "",
45094 + (mode & GR_WRITE) ? " writing" : "");
45095 + err = -EACCES;
45096 + } else if ((err & mode) != mode) {
45097 + err = -EACCES;
45098 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45099 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45100 + path, (mode & GR_READ) ? " reading" : "",
45101 + (mode & GR_WRITE) ? " writing" : "");
45102 + err = 0;
45103 + } else
45104 + err = 0;
45105 +
45106 + out:
45107 + preempt_enable();
45108 +
45109 + return err;
45110 +}
45111 +#endif
45112 +
45113 +int
45114 +gr_handle_proc_ptrace(struct task_struct *task)
45115 +{
45116 + struct file *filp;
45117 + struct task_struct *tmp = task;
45118 + struct task_struct *curtemp = current;
45119 + __u32 retmode;
45120 +
45121 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45122 + if (unlikely(!(gr_status & GR_READY)))
45123 + return 0;
45124 +#endif
45125 +
45126 + read_lock(&tasklist_lock);
45127 + read_lock(&grsec_exec_file_lock);
45128 + filp = task->exec_file;
45129 +
45130 + while (tmp->pid > 0) {
45131 + if (tmp == curtemp)
45132 + break;
45133 + tmp = tmp->real_parent;
45134 + }
45135 +
45136 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45137 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45138 + read_unlock(&grsec_exec_file_lock);
45139 + read_unlock(&tasklist_lock);
45140 + return 1;
45141 + }
45142 +
45143 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45144 + if (!(gr_status & GR_READY)) {
45145 + read_unlock(&grsec_exec_file_lock);
45146 + read_unlock(&tasklist_lock);
45147 + return 0;
45148 + }
45149 +#endif
45150 +
45151 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45152 + read_unlock(&grsec_exec_file_lock);
45153 + read_unlock(&tasklist_lock);
45154 +
45155 + if (retmode & GR_NOPTRACE)
45156 + return 1;
45157 +
45158 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45159 + && (current->acl != task->acl || (current->acl != current->role->root_label
45160 + && current->pid != task->pid)))
45161 + return 1;
45162 +
45163 + return 0;
45164 +}
45165 +
45166 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45167 +{
45168 + if (unlikely(!(gr_status & GR_READY)))
45169 + return;
45170 +
45171 + if (!(current->role->roletype & GR_ROLE_GOD))
45172 + return;
45173 +
45174 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45175 + p->role->rolename, gr_task_roletype_to_char(p),
45176 + p->acl->filename);
45177 +}
45178 +
45179 +int
45180 +gr_handle_ptrace(struct task_struct *task, const long request)
45181 +{
45182 + struct task_struct *tmp = task;
45183 + struct task_struct *curtemp = current;
45184 + __u32 retmode;
45185 +
45186 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45187 + if (unlikely(!(gr_status & GR_READY)))
45188 + return 0;
45189 +#endif
45190 +
45191 + read_lock(&tasklist_lock);
45192 + while (tmp->pid > 0) {
45193 + if (tmp == curtemp)
45194 + break;
45195 + tmp = tmp->real_parent;
45196 + }
45197 +
45198 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45199 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45200 + read_unlock(&tasklist_lock);
45201 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45202 + return 1;
45203 + }
45204 + read_unlock(&tasklist_lock);
45205 +
45206 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45207 + if (!(gr_status & GR_READY))
45208 + return 0;
45209 +#endif
45210 +
45211 + read_lock(&grsec_exec_file_lock);
45212 + if (unlikely(!task->exec_file)) {
45213 + read_unlock(&grsec_exec_file_lock);
45214 + return 0;
45215 + }
45216 +
45217 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45218 + read_unlock(&grsec_exec_file_lock);
45219 +
45220 + if (retmode & GR_NOPTRACE) {
45221 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45222 + return 1;
45223 + }
45224 +
45225 + if (retmode & GR_PTRACERD) {
45226 + switch (request) {
45227 + case PTRACE_POKETEXT:
45228 + case PTRACE_POKEDATA:
45229 + case PTRACE_POKEUSR:
45230 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45231 + case PTRACE_SETREGS:
45232 + case PTRACE_SETFPREGS:
45233 +#endif
45234 +#ifdef CONFIG_X86
45235 + case PTRACE_SETFPXREGS:
45236 +#endif
45237 +#ifdef CONFIG_ALTIVEC
45238 + case PTRACE_SETVRREGS:
45239 +#endif
45240 + return 1;
45241 + default:
45242 + return 0;
45243 + }
45244 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
45245 + !(current->role->roletype & GR_ROLE_GOD) &&
45246 + (current->acl != task->acl)) {
45247 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45248 + return 1;
45249 + }
45250 +
45251 + return 0;
45252 +}
45253 +
45254 +static int is_writable_mmap(const struct file *filp)
45255 +{
45256 + struct task_struct *task = current;
45257 + struct acl_object_label *obj, *obj2;
45258 +
45259 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45260 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45261 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45262 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45263 + task->role->root_label);
45264 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45265 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45266 + return 1;
45267 + }
45268 + }
45269 + return 0;
45270 +}
45271 +
45272 +int
45273 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45274 +{
45275 + __u32 mode;
45276 +
45277 + if (unlikely(!file || !(prot & PROT_EXEC)))
45278 + return 1;
45279 +
45280 + if (is_writable_mmap(file))
45281 + return 0;
45282 +
45283 + mode =
45284 + gr_search_file(file->f_path.dentry,
45285 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45286 + file->f_path.mnt);
45287 +
45288 + if (!gr_tpe_allow(file))
45289 + return 0;
45290 +
45291 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45292 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45293 + return 0;
45294 + } else if (unlikely(!(mode & GR_EXEC))) {
45295 + return 0;
45296 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45297 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45298 + return 1;
45299 + }
45300 +
45301 + return 1;
45302 +}
45303 +
45304 +int
45305 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45306 +{
45307 + __u32 mode;
45308 +
45309 + if (unlikely(!file || !(prot & PROT_EXEC)))
45310 + return 1;
45311 +
45312 + if (is_writable_mmap(file))
45313 + return 0;
45314 +
45315 + mode =
45316 + gr_search_file(file->f_path.dentry,
45317 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45318 + file->f_path.mnt);
45319 +
45320 + if (!gr_tpe_allow(file))
45321 + return 0;
45322 +
45323 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45324 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45325 + return 0;
45326 + } else if (unlikely(!(mode & GR_EXEC))) {
45327 + return 0;
45328 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45329 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45330 + return 1;
45331 + }
45332 +
45333 + return 1;
45334 +}
45335 +
45336 +void
45337 +gr_acl_handle_psacct(struct task_struct *task, const long code)
45338 +{
45339 + unsigned long runtime;
45340 + unsigned long cputime;
45341 + unsigned int wday, cday;
45342 + __u8 whr, chr;
45343 + __u8 wmin, cmin;
45344 + __u8 wsec, csec;
45345 + struct timespec timeval;
45346 +
45347 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45348 + !(task->acl->mode & GR_PROCACCT)))
45349 + return;
45350 +
45351 + do_posix_clock_monotonic_gettime(&timeval);
45352 + runtime = timeval.tv_sec - task->start_time.tv_sec;
45353 + wday = runtime / (3600 * 24);
45354 + runtime -= wday * (3600 * 24);
45355 + whr = runtime / 3600;
45356 + runtime -= whr * 3600;
45357 + wmin = runtime / 60;
45358 + runtime -= wmin * 60;
45359 + wsec = runtime;
45360 +
45361 + cputime = (task->utime + task->stime) / HZ;
45362 + cday = cputime / (3600 * 24);
45363 + cputime -= cday * (3600 * 24);
45364 + chr = cputime / 3600;
45365 + cputime -= chr * 3600;
45366 + cmin = cputime / 60;
45367 + cputime -= cmin * 60;
45368 + csec = cputime;
45369 +
45370 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45371 +
45372 + return;
45373 +}
45374 +
45375 +void gr_set_kernel_label(struct task_struct *task)
45376 +{
45377 + if (gr_status & GR_READY) {
45378 + task->role = kernel_role;
45379 + task->acl = kernel_role->root_label;
45380 + }
45381 + return;
45382 +}
45383 +
45384 +#ifdef CONFIG_TASKSTATS
45385 +int gr_is_taskstats_denied(int pid)
45386 +{
45387 + struct task_struct *task;
45388 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45389 + const struct cred *cred;
45390 +#endif
45391 + int ret = 0;
45392 +
45393 + /* restrict taskstats viewing to un-chrooted root users
45394 + who have the 'view' subject flag if the RBAC system is enabled
45395 + */
45396 +
45397 + rcu_read_lock();
45398 + read_lock(&tasklist_lock);
45399 + task = find_task_by_vpid(pid);
45400 + if (task) {
45401 +#ifdef CONFIG_GRKERNSEC_CHROOT
45402 + if (proc_is_chrooted(task))
45403 + ret = -EACCES;
45404 +#endif
45405 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45406 + cred = __task_cred(task);
45407 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45408 + if (cred->uid != 0)
45409 + ret = -EACCES;
45410 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45411 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45412 + ret = -EACCES;
45413 +#endif
45414 +#endif
45415 + if (gr_status & GR_READY) {
45416 + if (!(task->acl->mode & GR_VIEW))
45417 + ret = -EACCES;
45418 + }
45419 + } else
45420 + ret = -ENOENT;
45421 +
45422 + read_unlock(&tasklist_lock);
45423 + rcu_read_unlock();
45424 +
45425 + return ret;
45426 +}
45427 +#endif
45428 +
45429 +/* AUXV entries are filled via a descendant of search_binary_handler
45430 + after we've already applied the subject for the target
45431 +*/
45432 +int gr_acl_enable_at_secure(void)
45433 +{
45434 + if (unlikely(!(gr_status & GR_READY)))
45435 + return 0;
45436 +
45437 + if (current->acl->mode & GR_ATSECURE)
45438 + return 1;
45439 +
45440 + return 0;
45441 +}
45442 +
45443 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45444 +{
45445 + struct task_struct *task = current;
45446 + struct dentry *dentry = file->f_path.dentry;
45447 + struct vfsmount *mnt = file->f_path.mnt;
45448 + struct acl_object_label *obj, *tmp;
45449 + struct acl_subject_label *subj;
45450 + unsigned int bufsize;
45451 + int is_not_root;
45452 + char *path;
45453 + dev_t dev = __get_dev(dentry);
45454 +
45455 + if (unlikely(!(gr_status & GR_READY)))
45456 + return 1;
45457 +
45458 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45459 + return 1;
45460 +
45461 + /* ignore Eric Biederman */
45462 + if (IS_PRIVATE(dentry->d_inode))
45463 + return 1;
45464 +
45465 + subj = task->acl;
45466 + do {
45467 + obj = lookup_acl_obj_label(ino, dev, subj);
45468 + if (obj != NULL)
45469 + return (obj->mode & GR_FIND) ? 1 : 0;
45470 + } while ((subj = subj->parent_subject));
45471 +
45472 + /* this is purely an optimization since we're looking for an object
45473 + for the directory we're doing a readdir on
45474 + if it's possible for any globbed object to match the entry we're
45475 + filling into the directory, then the object we find here will be
45476 + an anchor point with attached globbed objects
45477 + */
45478 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45479 + if (obj->globbed == NULL)
45480 + return (obj->mode & GR_FIND) ? 1 : 0;
45481 +
45482 + is_not_root = ((obj->filename[0] == '/') &&
45483 + (obj->filename[1] == '\0')) ? 0 : 1;
45484 + bufsize = PAGE_SIZE - namelen - is_not_root;
45485 +
45486 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
45487 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45488 + return 1;
45489 +
45490 + preempt_disable();
45491 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45492 + bufsize);
45493 +
45494 + bufsize = strlen(path);
45495 +
45496 + /* if base is "/", don't append an additional slash */
45497 + if (is_not_root)
45498 + *(path + bufsize) = '/';
45499 + memcpy(path + bufsize + is_not_root, name, namelen);
45500 + *(path + bufsize + namelen + is_not_root) = '\0';
45501 +
45502 + tmp = obj->globbed;
45503 + while (tmp) {
45504 + if (!glob_match(tmp->filename, path)) {
45505 + preempt_enable();
45506 + return (tmp->mode & GR_FIND) ? 1 : 0;
45507 + }
45508 + tmp = tmp->next;
45509 + }
45510 + preempt_enable();
45511 + return (obj->mode & GR_FIND) ? 1 : 0;
45512 +}
45513 +
45514 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45515 +EXPORT_SYMBOL(gr_acl_is_enabled);
45516 +#endif
45517 +EXPORT_SYMBOL(gr_learn_resource);
45518 +EXPORT_SYMBOL(gr_set_kernel_label);
45519 +#ifdef CONFIG_SECURITY
45520 +EXPORT_SYMBOL(gr_check_user_change);
45521 +EXPORT_SYMBOL(gr_check_group_change);
45522 +#endif
45523 +
45524 diff -urNp linux-2.6.39.4/grsecurity/gracl_cap.c linux-2.6.39.4/grsecurity/gracl_cap.c
45525 --- linux-2.6.39.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45526 +++ linux-2.6.39.4/grsecurity/gracl_cap.c 2011-08-05 19:44:37.000000000 -0400
45527 @@ -0,0 +1,139 @@
45528 +#include <linux/kernel.h>
45529 +#include <linux/module.h>
45530 +#include <linux/sched.h>
45531 +#include <linux/gracl.h>
45532 +#include <linux/grsecurity.h>
45533 +#include <linux/grinternal.h>
45534 +
45535 +static const char *captab_log[] = {
45536 + "CAP_CHOWN",
45537 + "CAP_DAC_OVERRIDE",
45538 + "CAP_DAC_READ_SEARCH",
45539 + "CAP_FOWNER",
45540 + "CAP_FSETID",
45541 + "CAP_KILL",
45542 + "CAP_SETGID",
45543 + "CAP_SETUID",
45544 + "CAP_SETPCAP",
45545 + "CAP_LINUX_IMMUTABLE",
45546 + "CAP_NET_BIND_SERVICE",
45547 + "CAP_NET_BROADCAST",
45548 + "CAP_NET_ADMIN",
45549 + "CAP_NET_RAW",
45550 + "CAP_IPC_LOCK",
45551 + "CAP_IPC_OWNER",
45552 + "CAP_SYS_MODULE",
45553 + "CAP_SYS_RAWIO",
45554 + "CAP_SYS_CHROOT",
45555 + "CAP_SYS_PTRACE",
45556 + "CAP_SYS_PACCT",
45557 + "CAP_SYS_ADMIN",
45558 + "CAP_SYS_BOOT",
45559 + "CAP_SYS_NICE",
45560 + "CAP_SYS_RESOURCE",
45561 + "CAP_SYS_TIME",
45562 + "CAP_SYS_TTY_CONFIG",
45563 + "CAP_MKNOD",
45564 + "CAP_LEASE",
45565 + "CAP_AUDIT_WRITE",
45566 + "CAP_AUDIT_CONTROL",
45567 + "CAP_SETFCAP",
45568 + "CAP_MAC_OVERRIDE",
45569 + "CAP_MAC_ADMIN",
45570 + "CAP_SYSLOG"
45571 +};
45572 +
45573 +EXPORT_SYMBOL(gr_is_capable);
45574 +EXPORT_SYMBOL(gr_is_capable_nolog);
45575 +
45576 +int
45577 +gr_is_capable(const int cap)
45578 +{
45579 + struct task_struct *task = current;
45580 + const struct cred *cred = current_cred();
45581 + struct acl_subject_label *curracl;
45582 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45583 + kernel_cap_t cap_audit = __cap_empty_set;
45584 +
45585 + if (!gr_acl_is_enabled())
45586 + return 1;
45587 +
45588 + curracl = task->acl;
45589 +
45590 + cap_drop = curracl->cap_lower;
45591 + cap_mask = curracl->cap_mask;
45592 + cap_audit = curracl->cap_invert_audit;
45593 +
45594 + while ((curracl = curracl->parent_subject)) {
45595 + /* if the cap isn't specified in the current computed mask but is specified in the
45596 + current level subject, and is lowered in the current level subject, then add
45597 + it to the set of dropped capabilities
45598 + otherwise, add the current level subject's mask to the current computed mask
45599 + */
45600 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45601 + cap_raise(cap_mask, cap);
45602 + if (cap_raised(curracl->cap_lower, cap))
45603 + cap_raise(cap_drop, cap);
45604 + if (cap_raised(curracl->cap_invert_audit, cap))
45605 + cap_raise(cap_audit, cap);
45606 + }
45607 + }
45608 +
45609 + if (!cap_raised(cap_drop, cap)) {
45610 + if (cap_raised(cap_audit, cap))
45611 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45612 + return 1;
45613 + }
45614 +
45615 + curracl = task->acl;
45616 +
45617 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45618 + && cap_raised(cred->cap_effective, cap)) {
45619 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45620 + task->role->roletype, cred->uid,
45621 + cred->gid, task->exec_file ?
45622 + gr_to_filename(task->exec_file->f_path.dentry,
45623 + task->exec_file->f_path.mnt) : curracl->filename,
45624 + curracl->filename, 0UL,
45625 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45626 + return 1;
45627 + }
45628 +
45629 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45630 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45631 + return 0;
45632 +}
45633 +
45634 +int
45635 +gr_is_capable_nolog(const int cap)
45636 +{
45637 + struct acl_subject_label *curracl;
45638 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45639 +
45640 + if (!gr_acl_is_enabled())
45641 + return 1;
45642 +
45643 + curracl = current->acl;
45644 +
45645 + cap_drop = curracl->cap_lower;
45646 + cap_mask = curracl->cap_mask;
45647 +
45648 + while ((curracl = curracl->parent_subject)) {
45649 + /* if the cap isn't specified in the current computed mask but is specified in the
45650 + current level subject, and is lowered in the current level subject, then add
45651 + it to the set of dropped capabilities
45652 + otherwise, add the current level subject's mask to the current computed mask
45653 + */
45654 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45655 + cap_raise(cap_mask, cap);
45656 + if (cap_raised(curracl->cap_lower, cap))
45657 + cap_raise(cap_drop, cap);
45658 + }
45659 + }
45660 +
45661 + if (!cap_raised(cap_drop, cap))
45662 + return 1;
45663 +
45664 + return 0;
45665 +}
45666 +
45667 diff -urNp linux-2.6.39.4/grsecurity/gracl_fs.c linux-2.6.39.4/grsecurity/gracl_fs.c
45668 --- linux-2.6.39.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45669 +++ linux-2.6.39.4/grsecurity/gracl_fs.c 2011-08-05 19:44:37.000000000 -0400
45670 @@ -0,0 +1,431 @@
45671 +#include <linux/kernel.h>
45672 +#include <linux/sched.h>
45673 +#include <linux/types.h>
45674 +#include <linux/fs.h>
45675 +#include <linux/file.h>
45676 +#include <linux/stat.h>
45677 +#include <linux/grsecurity.h>
45678 +#include <linux/grinternal.h>
45679 +#include <linux/gracl.h>
45680 +
45681 +__u32
45682 +gr_acl_handle_hidden_file(const struct dentry * dentry,
45683 + const struct vfsmount * mnt)
45684 +{
45685 + __u32 mode;
45686 +
45687 + if (unlikely(!dentry->d_inode))
45688 + return GR_FIND;
45689 +
45690 + mode =
45691 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45692 +
45693 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45694 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45695 + return mode;
45696 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45697 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45698 + return 0;
45699 + } else if (unlikely(!(mode & GR_FIND)))
45700 + return 0;
45701 +
45702 + return GR_FIND;
45703 +}
45704 +
45705 +__u32
45706 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45707 + const int fmode)
45708 +{
45709 + __u32 reqmode = GR_FIND;
45710 + __u32 mode;
45711 +
45712 + if (unlikely(!dentry->d_inode))
45713 + return reqmode;
45714 +
45715 + if (unlikely(fmode & O_APPEND))
45716 + reqmode |= GR_APPEND;
45717 + else if (unlikely(fmode & FMODE_WRITE))
45718 + reqmode |= GR_WRITE;
45719 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45720 + reqmode |= GR_READ;
45721 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45722 + reqmode &= ~GR_READ;
45723 + mode =
45724 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45725 + mnt);
45726 +
45727 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45728 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45729 + reqmode & GR_READ ? " reading" : "",
45730 + reqmode & GR_WRITE ? " writing" : reqmode &
45731 + GR_APPEND ? " appending" : "");
45732 + return reqmode;
45733 + } else
45734 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45735 + {
45736 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45737 + reqmode & GR_READ ? " reading" : "",
45738 + reqmode & GR_WRITE ? " writing" : reqmode &
45739 + GR_APPEND ? " appending" : "");
45740 + return 0;
45741 + } else if (unlikely((mode & reqmode) != reqmode))
45742 + return 0;
45743 +
45744 + return reqmode;
45745 +}
45746 +
45747 +__u32
45748 +gr_acl_handle_creat(const struct dentry * dentry,
45749 + const struct dentry * p_dentry,
45750 + const struct vfsmount * p_mnt, const int fmode,
45751 + const int imode)
45752 +{
45753 + __u32 reqmode = GR_WRITE | GR_CREATE;
45754 + __u32 mode;
45755 +
45756 + if (unlikely(fmode & O_APPEND))
45757 + reqmode |= GR_APPEND;
45758 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45759 + reqmode |= GR_READ;
45760 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45761 + reqmode |= GR_SETID;
45762 +
45763 + mode =
45764 + gr_check_create(dentry, p_dentry, p_mnt,
45765 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45766 +
45767 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45768 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45769 + reqmode & GR_READ ? " reading" : "",
45770 + reqmode & GR_WRITE ? " writing" : reqmode &
45771 + GR_APPEND ? " appending" : "");
45772 + return reqmode;
45773 + } else
45774 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45775 + {
45776 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45777 + reqmode & GR_READ ? " reading" : "",
45778 + reqmode & GR_WRITE ? " writing" : reqmode &
45779 + GR_APPEND ? " appending" : "");
45780 + return 0;
45781 + } else if (unlikely((mode & reqmode) != reqmode))
45782 + return 0;
45783 +
45784 + return reqmode;
45785 +}
45786 +
45787 +__u32
45788 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45789 + const int fmode)
45790 +{
45791 + __u32 mode, reqmode = GR_FIND;
45792 +
45793 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45794 + reqmode |= GR_EXEC;
45795 + if (fmode & S_IWOTH)
45796 + reqmode |= GR_WRITE;
45797 + if (fmode & S_IROTH)
45798 + reqmode |= GR_READ;
45799 +
45800 + mode =
45801 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45802 + mnt);
45803 +
45804 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45805 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45806 + reqmode & GR_READ ? " reading" : "",
45807 + reqmode & GR_WRITE ? " writing" : "",
45808 + reqmode & GR_EXEC ? " executing" : "");
45809 + return reqmode;
45810 + } else
45811 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45812 + {
45813 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45814 + reqmode & GR_READ ? " reading" : "",
45815 + reqmode & GR_WRITE ? " writing" : "",
45816 + reqmode & GR_EXEC ? " executing" : "");
45817 + return 0;
45818 + } else if (unlikely((mode & reqmode) != reqmode))
45819 + return 0;
45820 +
45821 + return reqmode;
45822 +}
45823 +
45824 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45825 +{
45826 + __u32 mode;
45827 +
45828 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45829 +
45830 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45831 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45832 + return mode;
45833 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45834 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45835 + return 0;
45836 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45837 + return 0;
45838 +
45839 + return (reqmode);
45840 +}
45841 +
45842 +__u32
45843 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45844 +{
45845 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45846 +}
45847 +
45848 +__u32
45849 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45850 +{
45851 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45852 +}
45853 +
45854 +__u32
45855 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45856 +{
45857 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45858 +}
45859 +
45860 +__u32
45861 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45862 +{
45863 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45864 +}
45865 +
45866 +__u32
45867 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45868 + mode_t mode)
45869 +{
45870 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45871 + return 1;
45872 +
45873 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45874 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45875 + GR_FCHMOD_ACL_MSG);
45876 + } else {
45877 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45878 + }
45879 +}
45880 +
45881 +__u32
45882 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45883 + mode_t mode)
45884 +{
45885 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45886 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45887 + GR_CHMOD_ACL_MSG);
45888 + } else {
45889 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45890 + }
45891 +}
45892 +
45893 +__u32
45894 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45895 +{
45896 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45897 +}
45898 +
45899 +__u32
45900 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45901 +{
45902 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45903 +}
45904 +
45905 +__u32
45906 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45907 +{
45908 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45909 +}
45910 +
45911 +__u32
45912 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45913 +{
45914 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45915 + GR_UNIXCONNECT_ACL_MSG);
45916 +}
45917 +
45918 +/* hardlinks require at minimum create permission,
45919 + any additional privilege required is based on the
45920 + privilege of the file being linked to
45921 +*/
45922 +__u32
45923 +gr_acl_handle_link(const struct dentry * new_dentry,
45924 + const struct dentry * parent_dentry,
45925 + const struct vfsmount * parent_mnt,
45926 + const struct dentry * old_dentry,
45927 + const struct vfsmount * old_mnt, const char *to)
45928 +{
45929 + __u32 mode;
45930 + __u32 needmode = GR_CREATE | GR_LINK;
45931 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
45932 +
45933 + mode =
45934 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
45935 + old_mnt);
45936 +
45937 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
45938 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45939 + return mode;
45940 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45941 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
45942 + return 0;
45943 + } else if (unlikely((mode & needmode) != needmode))
45944 + return 0;
45945 +
45946 + return 1;
45947 +}
45948 +
45949 +__u32
45950 +gr_acl_handle_symlink(const struct dentry * new_dentry,
45951 + const struct dentry * parent_dentry,
45952 + const struct vfsmount * parent_mnt, const char *from)
45953 +{
45954 + __u32 needmode = GR_WRITE | GR_CREATE;
45955 + __u32 mode;
45956 +
45957 + mode =
45958 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45959 + GR_CREATE | GR_AUDIT_CREATE |
45960 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
45961 +
45962 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
45963 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45964 + return mode;
45965 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
45966 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
45967 + return 0;
45968 + } else if (unlikely((mode & needmode) != needmode))
45969 + return 0;
45970 +
45971 + return (GR_WRITE | GR_CREATE);
45972 +}
45973 +
45974 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
45975 +{
45976 + __u32 mode;
45977 +
45978 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45979 +
45980 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45981 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
45982 + return mode;
45983 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45984 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
45985 + return 0;
45986 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45987 + return 0;
45988 +
45989 + return (reqmode);
45990 +}
45991 +
45992 +__u32
45993 +gr_acl_handle_mknod(const struct dentry * new_dentry,
45994 + const struct dentry * parent_dentry,
45995 + const struct vfsmount * parent_mnt,
45996 + const int mode)
45997 +{
45998 + __u32 reqmode = GR_WRITE | GR_CREATE;
45999 + if (unlikely(mode & (S_ISUID | S_ISGID)))
46000 + reqmode |= GR_SETID;
46001 +
46002 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46003 + reqmode, GR_MKNOD_ACL_MSG);
46004 +}
46005 +
46006 +__u32
46007 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
46008 + const struct dentry *parent_dentry,
46009 + const struct vfsmount *parent_mnt)
46010 +{
46011 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46012 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
46013 +}
46014 +
46015 +#define RENAME_CHECK_SUCCESS(old, new) \
46016 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
46017 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
46018 +
46019 +int
46020 +gr_acl_handle_rename(struct dentry *new_dentry,
46021 + struct dentry *parent_dentry,
46022 + const struct vfsmount *parent_mnt,
46023 + struct dentry *old_dentry,
46024 + struct inode *old_parent_inode,
46025 + struct vfsmount *old_mnt, const char *newname)
46026 +{
46027 + __u32 comp1, comp2;
46028 + int error = 0;
46029 +
46030 + if (unlikely(!gr_acl_is_enabled()))
46031 + return 0;
46032 +
46033 + if (!new_dentry->d_inode) {
46034 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
46035 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
46036 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
46037 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
46038 + GR_DELETE | GR_AUDIT_DELETE |
46039 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46040 + GR_SUPPRESS, old_mnt);
46041 + } else {
46042 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
46043 + GR_CREATE | GR_DELETE |
46044 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46045 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46046 + GR_SUPPRESS, parent_mnt);
46047 + comp2 =
46048 + gr_search_file(old_dentry,
46049 + GR_READ | GR_WRITE | GR_AUDIT_READ |
46050 + GR_DELETE | GR_AUDIT_DELETE |
46051 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46052 + }
46053 +
46054 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46055 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46056 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46057 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46058 + && !(comp2 & GR_SUPPRESS)) {
46059 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46060 + error = -EACCES;
46061 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46062 + error = -EACCES;
46063 +
46064 + return error;
46065 +}
46066 +
46067 +void
46068 +gr_acl_handle_exit(void)
46069 +{
46070 + u16 id;
46071 + char *rolename;
46072 + struct file *exec_file;
46073 +
46074 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46075 + !(current->role->roletype & GR_ROLE_PERSIST))) {
46076 + id = current->acl_role_id;
46077 + rolename = current->role->rolename;
46078 + gr_set_acls(1);
46079 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46080 + }
46081 +
46082 + write_lock(&grsec_exec_file_lock);
46083 + exec_file = current->exec_file;
46084 + current->exec_file = NULL;
46085 + write_unlock(&grsec_exec_file_lock);
46086 +
46087 + if (exec_file)
46088 + fput(exec_file);
46089 +}
46090 +
46091 +int
46092 +gr_acl_handle_procpidmem(const struct task_struct *task)
46093 +{
46094 + if (unlikely(!gr_acl_is_enabled()))
46095 + return 0;
46096 +
46097 + if (task != current && task->acl->mode & GR_PROTPROCFD)
46098 + return -EACCES;
46099 +
46100 + return 0;
46101 +}
46102 diff -urNp linux-2.6.39.4/grsecurity/gracl_ip.c linux-2.6.39.4/grsecurity/gracl_ip.c
46103 --- linux-2.6.39.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46104 +++ linux-2.6.39.4/grsecurity/gracl_ip.c 2011-08-05 19:44:37.000000000 -0400
46105 @@ -0,0 +1,381 @@
46106 +#include <linux/kernel.h>
46107 +#include <asm/uaccess.h>
46108 +#include <asm/errno.h>
46109 +#include <net/sock.h>
46110 +#include <linux/file.h>
46111 +#include <linux/fs.h>
46112 +#include <linux/net.h>
46113 +#include <linux/in.h>
46114 +#include <linux/skbuff.h>
46115 +#include <linux/ip.h>
46116 +#include <linux/udp.h>
46117 +#include <linux/types.h>
46118 +#include <linux/sched.h>
46119 +#include <linux/netdevice.h>
46120 +#include <linux/inetdevice.h>
46121 +#include <linux/gracl.h>
46122 +#include <linux/grsecurity.h>
46123 +#include <linux/grinternal.h>
46124 +
46125 +#define GR_BIND 0x01
46126 +#define GR_CONNECT 0x02
46127 +#define GR_INVERT 0x04
46128 +#define GR_BINDOVERRIDE 0x08
46129 +#define GR_CONNECTOVERRIDE 0x10
46130 +#define GR_SOCK_FAMILY 0x20
46131 +
46132 +static const char * gr_protocols[IPPROTO_MAX] = {
46133 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46134 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46135 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46136 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46137 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46138 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46139 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46140 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46141 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46142 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46143 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46144 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46145 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46146 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46147 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46148 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46149 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46150 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46151 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46152 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46153 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46154 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46155 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46156 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46157 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46158 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46159 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46160 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46161 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46162 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46163 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46164 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46165 + };
46166 +
46167 +static const char * gr_socktypes[SOCK_MAX] = {
46168 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46169 + "unknown:7", "unknown:8", "unknown:9", "packet"
46170 + };
46171 +
46172 +static const char * gr_sockfamilies[AF_MAX+1] = {
46173 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46174 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46175 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46176 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46177 + };
46178 +
46179 +const char *
46180 +gr_proto_to_name(unsigned char proto)
46181 +{
46182 + return gr_protocols[proto];
46183 +}
46184 +
46185 +const char *
46186 +gr_socktype_to_name(unsigned char type)
46187 +{
46188 + return gr_socktypes[type];
46189 +}
46190 +
46191 +const char *
46192 +gr_sockfamily_to_name(unsigned char family)
46193 +{
46194 + return gr_sockfamilies[family];
46195 +}
46196 +
46197 +int
46198 +gr_search_socket(const int domain, const int type, const int protocol)
46199 +{
46200 + struct acl_subject_label *curr;
46201 + const struct cred *cred = current_cred();
46202 +
46203 + if (unlikely(!gr_acl_is_enabled()))
46204 + goto exit;
46205 +
46206 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
46207 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46208 + goto exit; // let the kernel handle it
46209 +
46210 + curr = current->acl;
46211 +
46212 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46213 + /* the family is allowed, if this is PF_INET allow it only if
46214 + the extra sock type/protocol checks pass */
46215 + if (domain == PF_INET)
46216 + goto inet_check;
46217 + goto exit;
46218 + } else {
46219 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46220 + __u32 fakeip = 0;
46221 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46222 + current->role->roletype, cred->uid,
46223 + cred->gid, current->exec_file ?
46224 + gr_to_filename(current->exec_file->f_path.dentry,
46225 + current->exec_file->f_path.mnt) :
46226 + curr->filename, curr->filename,
46227 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46228 + &current->signal->saved_ip);
46229 + goto exit;
46230 + }
46231 + goto exit_fail;
46232 + }
46233 +
46234 +inet_check:
46235 + /* the rest of this checking is for IPv4 only */
46236 + if (!curr->ips)
46237 + goto exit;
46238 +
46239 + if ((curr->ip_type & (1 << type)) &&
46240 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46241 + goto exit;
46242 +
46243 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46244 + /* we don't place acls on raw sockets , and sometimes
46245 + dgram/ip sockets are opened for ioctl and not
46246 + bind/connect, so we'll fake a bind learn log */
46247 + if (type == SOCK_RAW || type == SOCK_PACKET) {
46248 + __u32 fakeip = 0;
46249 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46250 + current->role->roletype, cred->uid,
46251 + cred->gid, current->exec_file ?
46252 + gr_to_filename(current->exec_file->f_path.dentry,
46253 + current->exec_file->f_path.mnt) :
46254 + curr->filename, curr->filename,
46255 + &fakeip, 0, type,
46256 + protocol, GR_CONNECT, &current->signal->saved_ip);
46257 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46258 + __u32 fakeip = 0;
46259 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46260 + current->role->roletype, cred->uid,
46261 + cred->gid, current->exec_file ?
46262 + gr_to_filename(current->exec_file->f_path.dentry,
46263 + current->exec_file->f_path.mnt) :
46264 + curr->filename, curr->filename,
46265 + &fakeip, 0, type,
46266 + protocol, GR_BIND, &current->signal->saved_ip);
46267 + }
46268 + /* we'll log when they use connect or bind */
46269 + goto exit;
46270 + }
46271 +
46272 +exit_fail:
46273 + if (domain == PF_INET)
46274 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46275 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
46276 + else
46277 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46278 + gr_socktype_to_name(type), protocol);
46279 +
46280 + return 0;
46281 +exit:
46282 + return 1;
46283 +}
46284 +
46285 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46286 +{
46287 + if ((ip->mode & mode) &&
46288 + (ip_port >= ip->low) &&
46289 + (ip_port <= ip->high) &&
46290 + ((ntohl(ip_addr) & our_netmask) ==
46291 + (ntohl(our_addr) & our_netmask))
46292 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46293 + && (ip->type & (1 << type))) {
46294 + if (ip->mode & GR_INVERT)
46295 + return 2; // specifically denied
46296 + else
46297 + return 1; // allowed
46298 + }
46299 +
46300 + return 0; // not specifically allowed, may continue parsing
46301 +}
46302 +
46303 +static int
46304 +gr_search_connectbind(const int full_mode, struct sock *sk,
46305 + struct sockaddr_in *addr, const int type)
46306 +{
46307 + char iface[IFNAMSIZ] = {0};
46308 + struct acl_subject_label *curr;
46309 + struct acl_ip_label *ip;
46310 + struct inet_sock *isk;
46311 + struct net_device *dev;
46312 + struct in_device *idev;
46313 + unsigned long i;
46314 + int ret;
46315 + int mode = full_mode & (GR_BIND | GR_CONNECT);
46316 + __u32 ip_addr = 0;
46317 + __u32 our_addr;
46318 + __u32 our_netmask;
46319 + char *p;
46320 + __u16 ip_port = 0;
46321 + const struct cred *cred = current_cred();
46322 +
46323 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46324 + return 0;
46325 +
46326 + curr = current->acl;
46327 + isk = inet_sk(sk);
46328 +
46329 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46330 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46331 + addr->sin_addr.s_addr = curr->inaddr_any_override;
46332 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46333 + struct sockaddr_in saddr;
46334 + int err;
46335 +
46336 + saddr.sin_family = AF_INET;
46337 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
46338 + saddr.sin_port = isk->inet_sport;
46339 +
46340 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46341 + if (err)
46342 + return err;
46343 +
46344 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46345 + if (err)
46346 + return err;
46347 + }
46348 +
46349 + if (!curr->ips)
46350 + return 0;
46351 +
46352 + ip_addr = addr->sin_addr.s_addr;
46353 + ip_port = ntohs(addr->sin_port);
46354 +
46355 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46356 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46357 + current->role->roletype, cred->uid,
46358 + cred->gid, current->exec_file ?
46359 + gr_to_filename(current->exec_file->f_path.dentry,
46360 + current->exec_file->f_path.mnt) :
46361 + curr->filename, curr->filename,
46362 + &ip_addr, ip_port, type,
46363 + sk->sk_protocol, mode, &current->signal->saved_ip);
46364 + return 0;
46365 + }
46366 +
46367 + for (i = 0; i < curr->ip_num; i++) {
46368 + ip = *(curr->ips + i);
46369 + if (ip->iface != NULL) {
46370 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
46371 + p = strchr(iface, ':');
46372 + if (p != NULL)
46373 + *p = '\0';
46374 + dev = dev_get_by_name(sock_net(sk), iface);
46375 + if (dev == NULL)
46376 + continue;
46377 + idev = in_dev_get(dev);
46378 + if (idev == NULL) {
46379 + dev_put(dev);
46380 + continue;
46381 + }
46382 + rcu_read_lock();
46383 + for_ifa(idev) {
46384 + if (!strcmp(ip->iface, ifa->ifa_label)) {
46385 + our_addr = ifa->ifa_address;
46386 + our_netmask = 0xffffffff;
46387 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46388 + if (ret == 1) {
46389 + rcu_read_unlock();
46390 + in_dev_put(idev);
46391 + dev_put(dev);
46392 + return 0;
46393 + } else if (ret == 2) {
46394 + rcu_read_unlock();
46395 + in_dev_put(idev);
46396 + dev_put(dev);
46397 + goto denied;
46398 + }
46399 + }
46400 + } endfor_ifa(idev);
46401 + rcu_read_unlock();
46402 + in_dev_put(idev);
46403 + dev_put(dev);
46404 + } else {
46405 + our_addr = ip->addr;
46406 + our_netmask = ip->netmask;
46407 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46408 + if (ret == 1)
46409 + return 0;
46410 + else if (ret == 2)
46411 + goto denied;
46412 + }
46413 + }
46414 +
46415 +denied:
46416 + if (mode == GR_BIND)
46417 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46418 + else if (mode == GR_CONNECT)
46419 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46420 +
46421 + return -EACCES;
46422 +}
46423 +
46424 +int
46425 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46426 +{
46427 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46428 +}
46429 +
46430 +int
46431 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46432 +{
46433 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46434 +}
46435 +
46436 +int gr_search_listen(struct socket *sock)
46437 +{
46438 + struct sock *sk = sock->sk;
46439 + struct sockaddr_in addr;
46440 +
46441 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46442 + addr.sin_port = inet_sk(sk)->inet_sport;
46443 +
46444 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46445 +}
46446 +
46447 +int gr_search_accept(struct socket *sock)
46448 +{
46449 + struct sock *sk = sock->sk;
46450 + struct sockaddr_in addr;
46451 +
46452 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46453 + addr.sin_port = inet_sk(sk)->inet_sport;
46454 +
46455 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46456 +}
46457 +
46458 +int
46459 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46460 +{
46461 + if (addr)
46462 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46463 + else {
46464 + struct sockaddr_in sin;
46465 + const struct inet_sock *inet = inet_sk(sk);
46466 +
46467 + sin.sin_addr.s_addr = inet->inet_daddr;
46468 + sin.sin_port = inet->inet_dport;
46469 +
46470 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46471 + }
46472 +}
46473 +
46474 +int
46475 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46476 +{
46477 + struct sockaddr_in sin;
46478 +
46479 + if (unlikely(skb->len < sizeof (struct udphdr)))
46480 + return 0; // skip this packet
46481 +
46482 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46483 + sin.sin_port = udp_hdr(skb)->source;
46484 +
46485 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46486 +}
46487 diff -urNp linux-2.6.39.4/grsecurity/gracl_learn.c linux-2.6.39.4/grsecurity/gracl_learn.c
46488 --- linux-2.6.39.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46489 +++ linux-2.6.39.4/grsecurity/gracl_learn.c 2011-08-05 19:44:37.000000000 -0400
46490 @@ -0,0 +1,207 @@
46491 +#include <linux/kernel.h>
46492 +#include <linux/mm.h>
46493 +#include <linux/sched.h>
46494 +#include <linux/poll.h>
46495 +#include <linux/string.h>
46496 +#include <linux/file.h>
46497 +#include <linux/types.h>
46498 +#include <linux/vmalloc.h>
46499 +#include <linux/grinternal.h>
46500 +
46501 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46502 + size_t count, loff_t *ppos);
46503 +extern int gr_acl_is_enabled(void);
46504 +
46505 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46506 +static int gr_learn_attached;
46507 +
46508 +/* use a 512k buffer */
46509 +#define LEARN_BUFFER_SIZE (512 * 1024)
46510 +
46511 +static DEFINE_SPINLOCK(gr_learn_lock);
46512 +static DEFINE_MUTEX(gr_learn_user_mutex);
46513 +
46514 +/* we need to maintain two buffers, so that the kernel context of grlearn
46515 + uses a semaphore around the userspace copying, and the other kernel contexts
46516 + use a spinlock when copying into the buffer, since they cannot sleep
46517 +*/
46518 +static char *learn_buffer;
46519 +static char *learn_buffer_user;
46520 +static int learn_buffer_len;
46521 +static int learn_buffer_user_len;
46522 +
46523 +static ssize_t
46524 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46525 +{
46526 + DECLARE_WAITQUEUE(wait, current);
46527 + ssize_t retval = 0;
46528 +
46529 + add_wait_queue(&learn_wait, &wait);
46530 + set_current_state(TASK_INTERRUPTIBLE);
46531 + do {
46532 + mutex_lock(&gr_learn_user_mutex);
46533 + spin_lock(&gr_learn_lock);
46534 + if (learn_buffer_len)
46535 + break;
46536 + spin_unlock(&gr_learn_lock);
46537 + mutex_unlock(&gr_learn_user_mutex);
46538 + if (file->f_flags & O_NONBLOCK) {
46539 + retval = -EAGAIN;
46540 + goto out;
46541 + }
46542 + if (signal_pending(current)) {
46543 + retval = -ERESTARTSYS;
46544 + goto out;
46545 + }
46546 +
46547 + schedule();
46548 + } while (1);
46549 +
46550 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46551 + learn_buffer_user_len = learn_buffer_len;
46552 + retval = learn_buffer_len;
46553 + learn_buffer_len = 0;
46554 +
46555 + spin_unlock(&gr_learn_lock);
46556 +
46557 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46558 + retval = -EFAULT;
46559 +
46560 + mutex_unlock(&gr_learn_user_mutex);
46561 +out:
46562 + set_current_state(TASK_RUNNING);
46563 + remove_wait_queue(&learn_wait, &wait);
46564 + return retval;
46565 +}
46566 +
46567 +static unsigned int
46568 +poll_learn(struct file * file, poll_table * wait)
46569 +{
46570 + poll_wait(file, &learn_wait, wait);
46571 +
46572 + if (learn_buffer_len)
46573 + return (POLLIN | POLLRDNORM);
46574 +
46575 + return 0;
46576 +}
46577 +
46578 +void
46579 +gr_clear_learn_entries(void)
46580 +{
46581 + char *tmp;
46582 +
46583 + mutex_lock(&gr_learn_user_mutex);
46584 + spin_lock(&gr_learn_lock);
46585 + tmp = learn_buffer;
46586 + learn_buffer = NULL;
46587 + spin_unlock(&gr_learn_lock);
46588 + if (tmp)
46589 + vfree(tmp);
46590 + if (learn_buffer_user != NULL) {
46591 + vfree(learn_buffer_user);
46592 + learn_buffer_user = NULL;
46593 + }
46594 + learn_buffer_len = 0;
46595 + mutex_unlock(&gr_learn_user_mutex);
46596 +
46597 + return;
46598 +}
46599 +
46600 +void
46601 +gr_add_learn_entry(const char *fmt, ...)
46602 +{
46603 + va_list args;
46604 + unsigned int len;
46605 +
46606 + if (!gr_learn_attached)
46607 + return;
46608 +
46609 + spin_lock(&gr_learn_lock);
46610 +
46611 + /* leave a gap at the end so we know when it's "full" but don't have to
46612 + compute the exact length of the string we're trying to append
46613 + */
46614 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46615 + spin_unlock(&gr_learn_lock);
46616 + wake_up_interruptible(&learn_wait);
46617 + return;
46618 + }
46619 + if (learn_buffer == NULL) {
46620 + spin_unlock(&gr_learn_lock);
46621 + return;
46622 + }
46623 +
46624 + va_start(args, fmt);
46625 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46626 + va_end(args);
46627 +
46628 + learn_buffer_len += len + 1;
46629 +
46630 + spin_unlock(&gr_learn_lock);
46631 + wake_up_interruptible(&learn_wait);
46632 +
46633 + return;
46634 +}
46635 +
46636 +static int
46637 +open_learn(struct inode *inode, struct file *file)
46638 +{
46639 + if (file->f_mode & FMODE_READ && gr_learn_attached)
46640 + return -EBUSY;
46641 + if (file->f_mode & FMODE_READ) {
46642 + int retval = 0;
46643 + mutex_lock(&gr_learn_user_mutex);
46644 + if (learn_buffer == NULL)
46645 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46646 + if (learn_buffer_user == NULL)
46647 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46648 + if (learn_buffer == NULL) {
46649 + retval = -ENOMEM;
46650 + goto out_error;
46651 + }
46652 + if (learn_buffer_user == NULL) {
46653 + retval = -ENOMEM;
46654 + goto out_error;
46655 + }
46656 + learn_buffer_len = 0;
46657 + learn_buffer_user_len = 0;
46658 + gr_learn_attached = 1;
46659 +out_error:
46660 + mutex_unlock(&gr_learn_user_mutex);
46661 + return retval;
46662 + }
46663 + return 0;
46664 +}
46665 +
46666 +static int
46667 +close_learn(struct inode *inode, struct file *file)
46668 +{
46669 + if (file->f_mode & FMODE_READ) {
46670 + char *tmp = NULL;
46671 + mutex_lock(&gr_learn_user_mutex);
46672 + spin_lock(&gr_learn_lock);
46673 + tmp = learn_buffer;
46674 + learn_buffer = NULL;
46675 + spin_unlock(&gr_learn_lock);
46676 + if (tmp)
46677 + vfree(tmp);
46678 + if (learn_buffer_user != NULL) {
46679 + vfree(learn_buffer_user);
46680 + learn_buffer_user = NULL;
46681 + }
46682 + learn_buffer_len = 0;
46683 + learn_buffer_user_len = 0;
46684 + gr_learn_attached = 0;
46685 + mutex_unlock(&gr_learn_user_mutex);
46686 + }
46687 +
46688 + return 0;
46689 +}
46690 +
46691 +const struct file_operations grsec_fops = {
46692 + .read = read_learn,
46693 + .write = write_grsec_handler,
46694 + .open = open_learn,
46695 + .release = close_learn,
46696 + .poll = poll_learn,
46697 +};
46698 diff -urNp linux-2.6.39.4/grsecurity/gracl_res.c linux-2.6.39.4/grsecurity/gracl_res.c
46699 --- linux-2.6.39.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46700 +++ linux-2.6.39.4/grsecurity/gracl_res.c 2011-08-05 19:44:37.000000000 -0400
46701 @@ -0,0 +1,68 @@
46702 +#include <linux/kernel.h>
46703 +#include <linux/sched.h>
46704 +#include <linux/gracl.h>
46705 +#include <linux/grinternal.h>
46706 +
46707 +static const char *restab_log[] = {
46708 + [RLIMIT_CPU] = "RLIMIT_CPU",
46709 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46710 + [RLIMIT_DATA] = "RLIMIT_DATA",
46711 + [RLIMIT_STACK] = "RLIMIT_STACK",
46712 + [RLIMIT_CORE] = "RLIMIT_CORE",
46713 + [RLIMIT_RSS] = "RLIMIT_RSS",
46714 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
46715 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46716 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46717 + [RLIMIT_AS] = "RLIMIT_AS",
46718 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46719 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46720 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46721 + [RLIMIT_NICE] = "RLIMIT_NICE",
46722 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46723 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46724 + [GR_CRASH_RES] = "RLIMIT_CRASH"
46725 +};
46726 +
46727 +void
46728 +gr_log_resource(const struct task_struct *task,
46729 + const int res, const unsigned long wanted, const int gt)
46730 +{
46731 + const struct cred *cred;
46732 + unsigned long rlim;
46733 +
46734 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
46735 + return;
46736 +
46737 + // not yet supported resource
46738 + if (unlikely(!restab_log[res]))
46739 + return;
46740 +
46741 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46742 + rlim = task_rlimit_max(task, res);
46743 + else
46744 + rlim = task_rlimit(task, res);
46745 +
46746 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46747 + return;
46748 +
46749 + rcu_read_lock();
46750 + cred = __task_cred(task);
46751 +
46752 + if (res == RLIMIT_NPROC &&
46753 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46754 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46755 + goto out_rcu_unlock;
46756 + else if (res == RLIMIT_MEMLOCK &&
46757 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46758 + goto out_rcu_unlock;
46759 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46760 + goto out_rcu_unlock;
46761 + rcu_read_unlock();
46762 +
46763 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46764 +
46765 + return;
46766 +out_rcu_unlock:
46767 + rcu_read_unlock();
46768 + return;
46769 +}
46770 diff -urNp linux-2.6.39.4/grsecurity/gracl_segv.c linux-2.6.39.4/grsecurity/gracl_segv.c
46771 --- linux-2.6.39.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46772 +++ linux-2.6.39.4/grsecurity/gracl_segv.c 2011-08-05 19:44:37.000000000 -0400
46773 @@ -0,0 +1,299 @@
46774 +#include <linux/kernel.h>
46775 +#include <linux/mm.h>
46776 +#include <asm/uaccess.h>
46777 +#include <asm/errno.h>
46778 +#include <asm/mman.h>
46779 +#include <net/sock.h>
46780 +#include <linux/file.h>
46781 +#include <linux/fs.h>
46782 +#include <linux/net.h>
46783 +#include <linux/in.h>
46784 +#include <linux/slab.h>
46785 +#include <linux/types.h>
46786 +#include <linux/sched.h>
46787 +#include <linux/timer.h>
46788 +#include <linux/gracl.h>
46789 +#include <linux/grsecurity.h>
46790 +#include <linux/grinternal.h>
46791 +
46792 +static struct crash_uid *uid_set;
46793 +static unsigned short uid_used;
46794 +static DEFINE_SPINLOCK(gr_uid_lock);
46795 +extern rwlock_t gr_inode_lock;
46796 +extern struct acl_subject_label *
46797 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46798 + struct acl_role_label *role);
46799 +
46800 +#ifdef CONFIG_BTRFS_FS
46801 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46802 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46803 +#endif
46804 +
46805 +static inline dev_t __get_dev(const struct dentry *dentry)
46806 +{
46807 +#ifdef CONFIG_BTRFS_FS
46808 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46809 + return get_btrfs_dev_from_inode(dentry->d_inode);
46810 + else
46811 +#endif
46812 + return dentry->d_inode->i_sb->s_dev;
46813 +}
46814 +
46815 +int
46816 +gr_init_uidset(void)
46817 +{
46818 + uid_set =
46819 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46820 + uid_used = 0;
46821 +
46822 + return uid_set ? 1 : 0;
46823 +}
46824 +
46825 +void
46826 +gr_free_uidset(void)
46827 +{
46828 + if (uid_set)
46829 + kfree(uid_set);
46830 +
46831 + return;
46832 +}
46833 +
46834 +int
46835 +gr_find_uid(const uid_t uid)
46836 +{
46837 + struct crash_uid *tmp = uid_set;
46838 + uid_t buid;
46839 + int low = 0, high = uid_used - 1, mid;
46840 +
46841 + while (high >= low) {
46842 + mid = (low + high) >> 1;
46843 + buid = tmp[mid].uid;
46844 + if (buid == uid)
46845 + return mid;
46846 + if (buid > uid)
46847 + high = mid - 1;
46848 + if (buid < uid)
46849 + low = mid + 1;
46850 + }
46851 +
46852 + return -1;
46853 +}
46854 +
46855 +static __inline__ void
46856 +gr_insertsort(void)
46857 +{
46858 + unsigned short i, j;
46859 + struct crash_uid index;
46860 +
46861 + for (i = 1; i < uid_used; i++) {
46862 + index = uid_set[i];
46863 + j = i;
46864 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46865 + uid_set[j] = uid_set[j - 1];
46866 + j--;
46867 + }
46868 + uid_set[j] = index;
46869 + }
46870 +
46871 + return;
46872 +}
46873 +
46874 +static __inline__ void
46875 +gr_insert_uid(const uid_t uid, const unsigned long expires)
46876 +{
46877 + int loc;
46878 +
46879 + if (uid_used == GR_UIDTABLE_MAX)
46880 + return;
46881 +
46882 + loc = gr_find_uid(uid);
46883 +
46884 + if (loc >= 0) {
46885 + uid_set[loc].expires = expires;
46886 + return;
46887 + }
46888 +
46889 + uid_set[uid_used].uid = uid;
46890 + uid_set[uid_used].expires = expires;
46891 + uid_used++;
46892 +
46893 + gr_insertsort();
46894 +
46895 + return;
46896 +}
46897 +
46898 +void
46899 +gr_remove_uid(const unsigned short loc)
46900 +{
46901 + unsigned short i;
46902 +
46903 + for (i = loc + 1; i < uid_used; i++)
46904 + uid_set[i - 1] = uid_set[i];
46905 +
46906 + uid_used--;
46907 +
46908 + return;
46909 +}
46910 +
46911 +int
46912 +gr_check_crash_uid(const uid_t uid)
46913 +{
46914 + int loc;
46915 + int ret = 0;
46916 +
46917 + if (unlikely(!gr_acl_is_enabled()))
46918 + return 0;
46919 +
46920 + spin_lock(&gr_uid_lock);
46921 + loc = gr_find_uid(uid);
46922 +
46923 + if (loc < 0)
46924 + goto out_unlock;
46925 +
46926 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
46927 + gr_remove_uid(loc);
46928 + else
46929 + ret = 1;
46930 +
46931 +out_unlock:
46932 + spin_unlock(&gr_uid_lock);
46933 + return ret;
46934 +}
46935 +
46936 +static __inline__ int
46937 +proc_is_setxid(const struct cred *cred)
46938 +{
46939 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
46940 + cred->uid != cred->fsuid)
46941 + return 1;
46942 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
46943 + cred->gid != cred->fsgid)
46944 + return 1;
46945 +
46946 + return 0;
46947 +}
46948 +
46949 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
46950 +
46951 +void
46952 +gr_handle_crash(struct task_struct *task, const int sig)
46953 +{
46954 + struct acl_subject_label *curr;
46955 + struct acl_subject_label *curr2;
46956 + struct task_struct *tsk, *tsk2;
46957 + const struct cred *cred;
46958 + const struct cred *cred2;
46959 +
46960 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
46961 + return;
46962 +
46963 + if (unlikely(!gr_acl_is_enabled()))
46964 + return;
46965 +
46966 + curr = task->acl;
46967 +
46968 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
46969 + return;
46970 +
46971 + if (time_before_eq(curr->expires, get_seconds())) {
46972 + curr->expires = 0;
46973 + curr->crashes = 0;
46974 + }
46975 +
46976 + curr->crashes++;
46977 +
46978 + if (!curr->expires)
46979 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
46980 +
46981 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
46982 + time_after(curr->expires, get_seconds())) {
46983 + rcu_read_lock();
46984 + cred = __task_cred(task);
46985 + if (cred->uid && proc_is_setxid(cred)) {
46986 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
46987 + spin_lock(&gr_uid_lock);
46988 + gr_insert_uid(cred->uid, curr->expires);
46989 + spin_unlock(&gr_uid_lock);
46990 + curr->expires = 0;
46991 + curr->crashes = 0;
46992 + read_lock(&tasklist_lock);
46993 + do_each_thread(tsk2, tsk) {
46994 + cred2 = __task_cred(tsk);
46995 + if (tsk != task && cred2->uid == cred->uid)
46996 + gr_fake_force_sig(SIGKILL, tsk);
46997 + } while_each_thread(tsk2, tsk);
46998 + read_unlock(&tasklist_lock);
46999 + } else {
47000 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47001 + read_lock(&tasklist_lock);
47002 + do_each_thread(tsk2, tsk) {
47003 + if (likely(tsk != task)) {
47004 + curr2 = tsk->acl;
47005 +
47006 + if (curr2->device == curr->device &&
47007 + curr2->inode == curr->inode)
47008 + gr_fake_force_sig(SIGKILL, tsk);
47009 + }
47010 + } while_each_thread(tsk2, tsk);
47011 + read_unlock(&tasklist_lock);
47012 + }
47013 + rcu_read_unlock();
47014 + }
47015 +
47016 + return;
47017 +}
47018 +
47019 +int
47020 +gr_check_crash_exec(const struct file *filp)
47021 +{
47022 + struct acl_subject_label *curr;
47023 +
47024 + if (unlikely(!gr_acl_is_enabled()))
47025 + return 0;
47026 +
47027 + read_lock(&gr_inode_lock);
47028 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
47029 + __get_dev(filp->f_path.dentry),
47030 + current->role);
47031 + read_unlock(&gr_inode_lock);
47032 +
47033 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
47034 + (!curr->crashes && !curr->expires))
47035 + return 0;
47036 +
47037 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47038 + time_after(curr->expires, get_seconds()))
47039 + return 1;
47040 + else if (time_before_eq(curr->expires, get_seconds())) {
47041 + curr->crashes = 0;
47042 + curr->expires = 0;
47043 + }
47044 +
47045 + return 0;
47046 +}
47047 +
47048 +void
47049 +gr_handle_alertkill(struct task_struct *task)
47050 +{
47051 + struct acl_subject_label *curracl;
47052 + __u32 curr_ip;
47053 + struct task_struct *p, *p2;
47054 +
47055 + if (unlikely(!gr_acl_is_enabled()))
47056 + return;
47057 +
47058 + curracl = task->acl;
47059 + curr_ip = task->signal->curr_ip;
47060 +
47061 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47062 + read_lock(&tasklist_lock);
47063 + do_each_thread(p2, p) {
47064 + if (p->signal->curr_ip == curr_ip)
47065 + gr_fake_force_sig(SIGKILL, p);
47066 + } while_each_thread(p2, p);
47067 + read_unlock(&tasklist_lock);
47068 + } else if (curracl->mode & GR_KILLPROC)
47069 + gr_fake_force_sig(SIGKILL, task);
47070 +
47071 + return;
47072 +}
47073 diff -urNp linux-2.6.39.4/grsecurity/gracl_shm.c linux-2.6.39.4/grsecurity/gracl_shm.c
47074 --- linux-2.6.39.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47075 +++ linux-2.6.39.4/grsecurity/gracl_shm.c 2011-08-05 19:44:37.000000000 -0400
47076 @@ -0,0 +1,40 @@
47077 +#include <linux/kernel.h>
47078 +#include <linux/mm.h>
47079 +#include <linux/sched.h>
47080 +#include <linux/file.h>
47081 +#include <linux/ipc.h>
47082 +#include <linux/gracl.h>
47083 +#include <linux/grsecurity.h>
47084 +#include <linux/grinternal.h>
47085 +
47086 +int
47087 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47088 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47089 +{
47090 + struct task_struct *task;
47091 +
47092 + if (!gr_acl_is_enabled())
47093 + return 1;
47094 +
47095 + rcu_read_lock();
47096 + read_lock(&tasklist_lock);
47097 +
47098 + task = find_task_by_vpid(shm_cprid);
47099 +
47100 + if (unlikely(!task))
47101 + task = find_task_by_vpid(shm_lapid);
47102 +
47103 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47104 + (task->pid == shm_lapid)) &&
47105 + (task->acl->mode & GR_PROTSHM) &&
47106 + (task->acl != current->acl))) {
47107 + read_unlock(&tasklist_lock);
47108 + rcu_read_unlock();
47109 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47110 + return 0;
47111 + }
47112 + read_unlock(&tasklist_lock);
47113 + rcu_read_unlock();
47114 +
47115 + return 1;
47116 +}
47117 diff -urNp linux-2.6.39.4/grsecurity/grsec_chdir.c linux-2.6.39.4/grsecurity/grsec_chdir.c
47118 --- linux-2.6.39.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47119 +++ linux-2.6.39.4/grsecurity/grsec_chdir.c 2011-08-05 19:44:37.000000000 -0400
47120 @@ -0,0 +1,19 @@
47121 +#include <linux/kernel.h>
47122 +#include <linux/sched.h>
47123 +#include <linux/fs.h>
47124 +#include <linux/file.h>
47125 +#include <linux/grsecurity.h>
47126 +#include <linux/grinternal.h>
47127 +
47128 +void
47129 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47130 +{
47131 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47132 + if ((grsec_enable_chdir && grsec_enable_group &&
47133 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47134 + !grsec_enable_group)) {
47135 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47136 + }
47137 +#endif
47138 + return;
47139 +}
47140 diff -urNp linux-2.6.39.4/grsecurity/grsec_chroot.c linux-2.6.39.4/grsecurity/grsec_chroot.c
47141 --- linux-2.6.39.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47142 +++ linux-2.6.39.4/grsecurity/grsec_chroot.c 2011-08-05 19:44:37.000000000 -0400
47143 @@ -0,0 +1,349 @@
47144 +#include <linux/kernel.h>
47145 +#include <linux/module.h>
47146 +#include <linux/sched.h>
47147 +#include <linux/file.h>
47148 +#include <linux/fs.h>
47149 +#include <linux/mount.h>
47150 +#include <linux/types.h>
47151 +#include <linux/pid_namespace.h>
47152 +#include <linux/grsecurity.h>
47153 +#include <linux/grinternal.h>
47154 +
47155 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47156 +{
47157 +#ifdef CONFIG_GRKERNSEC
47158 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47159 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47160 + task->gr_is_chrooted = 1;
47161 + else
47162 + task->gr_is_chrooted = 0;
47163 +
47164 + task->gr_chroot_dentry = path->dentry;
47165 +#endif
47166 + return;
47167 +}
47168 +
47169 +void gr_clear_chroot_entries(struct task_struct *task)
47170 +{
47171 +#ifdef CONFIG_GRKERNSEC
47172 + task->gr_is_chrooted = 0;
47173 + task->gr_chroot_dentry = NULL;
47174 +#endif
47175 + return;
47176 +}
47177 +
47178 +int
47179 +gr_handle_chroot_unix(const pid_t pid)
47180 +{
47181 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47182 + struct task_struct *p;
47183 +
47184 + if (unlikely(!grsec_enable_chroot_unix))
47185 + return 1;
47186 +
47187 + if (likely(!proc_is_chrooted(current)))
47188 + return 1;
47189 +
47190 + rcu_read_lock();
47191 + read_lock(&tasklist_lock);
47192 + p = find_task_by_vpid_unrestricted(pid);
47193 + if (unlikely(p && !have_same_root(current, p))) {
47194 + read_unlock(&tasklist_lock);
47195 + rcu_read_unlock();
47196 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47197 + return 0;
47198 + }
47199 + read_unlock(&tasklist_lock);
47200 + rcu_read_unlock();
47201 +#endif
47202 + return 1;
47203 +}
47204 +
47205 +int
47206 +gr_handle_chroot_nice(void)
47207 +{
47208 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47209 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47210 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47211 + return -EPERM;
47212 + }
47213 +#endif
47214 + return 0;
47215 +}
47216 +
47217 +int
47218 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47219 +{
47220 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47221 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47222 + && proc_is_chrooted(current)) {
47223 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47224 + return -EACCES;
47225 + }
47226 +#endif
47227 + return 0;
47228 +}
47229 +
47230 +int
47231 +gr_handle_chroot_rawio(const struct inode *inode)
47232 +{
47233 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47234 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47235 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47236 + return 1;
47237 +#endif
47238 + return 0;
47239 +}
47240 +
47241 +int
47242 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47243 +{
47244 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47245 + struct task_struct *p;
47246 + int ret = 0;
47247 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47248 + return ret;
47249 +
47250 + read_lock(&tasklist_lock);
47251 + do_each_pid_task(pid, type, p) {
47252 + if (!have_same_root(current, p)) {
47253 + ret = 1;
47254 + goto out;
47255 + }
47256 + } while_each_pid_task(pid, type, p);
47257 +out:
47258 + read_unlock(&tasklist_lock);
47259 + return ret;
47260 +#endif
47261 + return 0;
47262 +}
47263 +
47264 +int
47265 +gr_pid_is_chrooted(struct task_struct *p)
47266 +{
47267 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47268 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47269 + return 0;
47270 +
47271 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47272 + !have_same_root(current, p)) {
47273 + return 1;
47274 + }
47275 +#endif
47276 + return 0;
47277 +}
47278 +
47279 +EXPORT_SYMBOL(gr_pid_is_chrooted);
47280 +
47281 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47282 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47283 +{
47284 + struct path path, currentroot;
47285 + int ret = 0;
47286 +
47287 + path.dentry = (struct dentry *)u_dentry;
47288 + path.mnt = (struct vfsmount *)u_mnt;
47289 + get_fs_root(current->fs, &currentroot);
47290 + if (path_is_under(&path, &currentroot))
47291 + ret = 1;
47292 + path_put(&currentroot);
47293 +
47294 + return ret;
47295 +}
47296 +#endif
47297 +
47298 +int
47299 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47300 +{
47301 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47302 + if (!grsec_enable_chroot_fchdir)
47303 + return 1;
47304 +
47305 + if (!proc_is_chrooted(current))
47306 + return 1;
47307 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47308 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47309 + return 0;
47310 + }
47311 +#endif
47312 + return 1;
47313 +}
47314 +
47315 +int
47316 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47317 + const time_t shm_createtime)
47318 +{
47319 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47320 + struct task_struct *p;
47321 + time_t starttime;
47322 +
47323 + if (unlikely(!grsec_enable_chroot_shmat))
47324 + return 1;
47325 +
47326 + if (likely(!proc_is_chrooted(current)))
47327 + return 1;
47328 +
47329 + rcu_read_lock();
47330 + read_lock(&tasklist_lock);
47331 +
47332 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47333 + starttime = p->start_time.tv_sec;
47334 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47335 + if (have_same_root(current, p)) {
47336 + goto allow;
47337 + } else {
47338 + read_unlock(&tasklist_lock);
47339 + rcu_read_unlock();
47340 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47341 + return 0;
47342 + }
47343 + }
47344 + /* creator exited, pid reuse, fall through to next check */
47345 + }
47346 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47347 + if (unlikely(!have_same_root(current, p))) {
47348 + read_unlock(&tasklist_lock);
47349 + rcu_read_unlock();
47350 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47351 + return 0;
47352 + }
47353 + }
47354 +
47355 +allow:
47356 + read_unlock(&tasklist_lock);
47357 + rcu_read_unlock();
47358 +#endif
47359 + return 1;
47360 +}
47361 +
47362 +void
47363 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47364 +{
47365 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47366 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47367 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47368 +#endif
47369 + return;
47370 +}
47371 +
47372 +int
47373 +gr_handle_chroot_mknod(const struct dentry *dentry,
47374 + const struct vfsmount *mnt, const int mode)
47375 +{
47376 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47377 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47378 + proc_is_chrooted(current)) {
47379 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47380 + return -EPERM;
47381 + }
47382 +#endif
47383 + return 0;
47384 +}
47385 +
47386 +int
47387 +gr_handle_chroot_mount(const struct dentry *dentry,
47388 + const struct vfsmount *mnt, const char *dev_name)
47389 +{
47390 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47391 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47392 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47393 + return -EPERM;
47394 + }
47395 +#endif
47396 + return 0;
47397 +}
47398 +
47399 +int
47400 +gr_handle_chroot_pivot(void)
47401 +{
47402 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47403 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47404 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47405 + return -EPERM;
47406 + }
47407 +#endif
47408 + return 0;
47409 +}
47410 +
47411 +int
47412 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47413 +{
47414 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47415 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47416 + !gr_is_outside_chroot(dentry, mnt)) {
47417 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47418 + return -EPERM;
47419 + }
47420 +#endif
47421 + return 0;
47422 +}
47423 +
47424 +int
47425 +gr_handle_chroot_caps(struct path *path)
47426 +{
47427 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47428 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47429 + (init_task.fs->root.dentry != path->dentry) &&
47430 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47431 +
47432 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47433 + const struct cred *old = current_cred();
47434 + struct cred *new = prepare_creds();
47435 + if (new == NULL)
47436 + return 1;
47437 +
47438 + new->cap_permitted = cap_drop(old->cap_permitted,
47439 + chroot_caps);
47440 + new->cap_inheritable = cap_drop(old->cap_inheritable,
47441 + chroot_caps);
47442 + new->cap_effective = cap_drop(old->cap_effective,
47443 + chroot_caps);
47444 +
47445 + commit_creds(new);
47446 +
47447 + return 0;
47448 + }
47449 +#endif
47450 + return 0;
47451 +}
47452 +
47453 +int
47454 +gr_handle_chroot_sysctl(const int op)
47455 +{
47456 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47457 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47458 + proc_is_chrooted(current))
47459 + return -EACCES;
47460 +#endif
47461 + return 0;
47462 +}
47463 +
47464 +void
47465 +gr_handle_chroot_chdir(struct path *path)
47466 +{
47467 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47468 + if (grsec_enable_chroot_chdir)
47469 + set_fs_pwd(current->fs, path);
47470 +#endif
47471 + return;
47472 +}
47473 +
47474 +int
47475 +gr_handle_chroot_chmod(const struct dentry *dentry,
47476 + const struct vfsmount *mnt, const int mode)
47477 +{
47478 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47479 + /* allow chmod +s on directories, but not files */
47480 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47481 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47482 + proc_is_chrooted(current)) {
47483 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47484 + return -EPERM;
47485 + }
47486 +#endif
47487 + return 0;
47488 +}
47489 +
47490 +#ifdef CONFIG_SECURITY
47491 +EXPORT_SYMBOL(gr_handle_chroot_caps);
47492 +#endif
47493 diff -urNp linux-2.6.39.4/grsecurity/grsec_disabled.c linux-2.6.39.4/grsecurity/grsec_disabled.c
47494 --- linux-2.6.39.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47495 +++ linux-2.6.39.4/grsecurity/grsec_disabled.c 2011-08-05 19:44:37.000000000 -0400
47496 @@ -0,0 +1,447 @@
47497 +#include <linux/kernel.h>
47498 +#include <linux/module.h>
47499 +#include <linux/sched.h>
47500 +#include <linux/file.h>
47501 +#include <linux/fs.h>
47502 +#include <linux/kdev_t.h>
47503 +#include <linux/net.h>
47504 +#include <linux/in.h>
47505 +#include <linux/ip.h>
47506 +#include <linux/skbuff.h>
47507 +#include <linux/sysctl.h>
47508 +
47509 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47510 +void
47511 +pax_set_initial_flags(struct linux_binprm *bprm)
47512 +{
47513 + return;
47514 +}
47515 +#endif
47516 +
47517 +#ifdef CONFIG_SYSCTL
47518 +__u32
47519 +gr_handle_sysctl(const struct ctl_table * table, const int op)
47520 +{
47521 + return 0;
47522 +}
47523 +#endif
47524 +
47525 +#ifdef CONFIG_TASKSTATS
47526 +int gr_is_taskstats_denied(int pid)
47527 +{
47528 + return 0;
47529 +}
47530 +#endif
47531 +
47532 +int
47533 +gr_acl_is_enabled(void)
47534 +{
47535 + return 0;
47536 +}
47537 +
47538 +int
47539 +gr_handle_rawio(const struct inode *inode)
47540 +{
47541 + return 0;
47542 +}
47543 +
47544 +void
47545 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47546 +{
47547 + return;
47548 +}
47549 +
47550 +int
47551 +gr_handle_ptrace(struct task_struct *task, const long request)
47552 +{
47553 + return 0;
47554 +}
47555 +
47556 +int
47557 +gr_handle_proc_ptrace(struct task_struct *task)
47558 +{
47559 + return 0;
47560 +}
47561 +
47562 +void
47563 +gr_learn_resource(const struct task_struct *task,
47564 + const int res, const unsigned long wanted, const int gt)
47565 +{
47566 + return;
47567 +}
47568 +
47569 +int
47570 +gr_set_acls(const int type)
47571 +{
47572 + return 0;
47573 +}
47574 +
47575 +int
47576 +gr_check_hidden_task(const struct task_struct *tsk)
47577 +{
47578 + return 0;
47579 +}
47580 +
47581 +int
47582 +gr_check_protected_task(const struct task_struct *task)
47583 +{
47584 + return 0;
47585 +}
47586 +
47587 +int
47588 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47589 +{
47590 + return 0;
47591 +}
47592 +
47593 +void
47594 +gr_copy_label(struct task_struct *tsk)
47595 +{
47596 + return;
47597 +}
47598 +
47599 +void
47600 +gr_set_pax_flags(struct task_struct *task)
47601 +{
47602 + return;
47603 +}
47604 +
47605 +int
47606 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47607 + const int unsafe_share)
47608 +{
47609 + return 0;
47610 +}
47611 +
47612 +void
47613 +gr_handle_delete(const ino_t ino, const dev_t dev)
47614 +{
47615 + return;
47616 +}
47617 +
47618 +void
47619 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47620 +{
47621 + return;
47622 +}
47623 +
47624 +void
47625 +gr_handle_crash(struct task_struct *task, const int sig)
47626 +{
47627 + return;
47628 +}
47629 +
47630 +int
47631 +gr_check_crash_exec(const struct file *filp)
47632 +{
47633 + return 0;
47634 +}
47635 +
47636 +int
47637 +gr_check_crash_uid(const uid_t uid)
47638 +{
47639 + return 0;
47640 +}
47641 +
47642 +void
47643 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47644 + struct dentry *old_dentry,
47645 + struct dentry *new_dentry,
47646 + struct vfsmount *mnt, const __u8 replace)
47647 +{
47648 + return;
47649 +}
47650 +
47651 +int
47652 +gr_search_socket(const int family, const int type, const int protocol)
47653 +{
47654 + return 1;
47655 +}
47656 +
47657 +int
47658 +gr_search_connectbind(const int mode, const struct socket *sock,
47659 + const struct sockaddr_in *addr)
47660 +{
47661 + return 0;
47662 +}
47663 +
47664 +int
47665 +gr_is_capable(const int cap)
47666 +{
47667 + return 1;
47668 +}
47669 +
47670 +int
47671 +gr_is_capable_nolog(const int cap)
47672 +{
47673 + return 1;
47674 +}
47675 +
47676 +void
47677 +gr_handle_alertkill(struct task_struct *task)
47678 +{
47679 + return;
47680 +}
47681 +
47682 +__u32
47683 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47684 +{
47685 + return 1;
47686 +}
47687 +
47688 +__u32
47689 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47690 + const struct vfsmount * mnt)
47691 +{
47692 + return 1;
47693 +}
47694 +
47695 +__u32
47696 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47697 + const int fmode)
47698 +{
47699 + return 1;
47700 +}
47701 +
47702 +__u32
47703 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47704 +{
47705 + return 1;
47706 +}
47707 +
47708 +__u32
47709 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47710 +{
47711 + return 1;
47712 +}
47713 +
47714 +int
47715 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47716 + unsigned int *vm_flags)
47717 +{
47718 + return 1;
47719 +}
47720 +
47721 +__u32
47722 +gr_acl_handle_truncate(const struct dentry * dentry,
47723 + const struct vfsmount * mnt)
47724 +{
47725 + return 1;
47726 +}
47727 +
47728 +__u32
47729 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47730 +{
47731 + return 1;
47732 +}
47733 +
47734 +__u32
47735 +gr_acl_handle_access(const struct dentry * dentry,
47736 + const struct vfsmount * mnt, const int fmode)
47737 +{
47738 + return 1;
47739 +}
47740 +
47741 +__u32
47742 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47743 + mode_t mode)
47744 +{
47745 + return 1;
47746 +}
47747 +
47748 +__u32
47749 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47750 + mode_t mode)
47751 +{
47752 + return 1;
47753 +}
47754 +
47755 +__u32
47756 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47757 +{
47758 + return 1;
47759 +}
47760 +
47761 +__u32
47762 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47763 +{
47764 + return 1;
47765 +}
47766 +
47767 +void
47768 +grsecurity_init(void)
47769 +{
47770 + return;
47771 +}
47772 +
47773 +__u32
47774 +gr_acl_handle_mknod(const struct dentry * new_dentry,
47775 + const struct dentry * parent_dentry,
47776 + const struct vfsmount * parent_mnt,
47777 + const int mode)
47778 +{
47779 + return 1;
47780 +}
47781 +
47782 +__u32
47783 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
47784 + const struct dentry * parent_dentry,
47785 + const struct vfsmount * parent_mnt)
47786 +{
47787 + return 1;
47788 +}
47789 +
47790 +__u32
47791 +gr_acl_handle_symlink(const struct dentry * new_dentry,
47792 + const struct dentry * parent_dentry,
47793 + const struct vfsmount * parent_mnt, const char *from)
47794 +{
47795 + return 1;
47796 +}
47797 +
47798 +__u32
47799 +gr_acl_handle_link(const struct dentry * new_dentry,
47800 + const struct dentry * parent_dentry,
47801 + const struct vfsmount * parent_mnt,
47802 + const struct dentry * old_dentry,
47803 + const struct vfsmount * old_mnt, const char *to)
47804 +{
47805 + return 1;
47806 +}
47807 +
47808 +int
47809 +gr_acl_handle_rename(const struct dentry *new_dentry,
47810 + const struct dentry *parent_dentry,
47811 + const struct vfsmount *parent_mnt,
47812 + const struct dentry *old_dentry,
47813 + const struct inode *old_parent_inode,
47814 + const struct vfsmount *old_mnt, const char *newname)
47815 +{
47816 + return 0;
47817 +}
47818 +
47819 +int
47820 +gr_acl_handle_filldir(const struct file *file, const char *name,
47821 + const int namelen, const ino_t ino)
47822 +{
47823 + return 1;
47824 +}
47825 +
47826 +int
47827 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47828 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47829 +{
47830 + return 1;
47831 +}
47832 +
47833 +int
47834 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47835 +{
47836 + return 0;
47837 +}
47838 +
47839 +int
47840 +gr_search_accept(const struct socket *sock)
47841 +{
47842 + return 0;
47843 +}
47844 +
47845 +int
47846 +gr_search_listen(const struct socket *sock)
47847 +{
47848 + return 0;
47849 +}
47850 +
47851 +int
47852 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47853 +{
47854 + return 0;
47855 +}
47856 +
47857 +__u32
47858 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47859 +{
47860 + return 1;
47861 +}
47862 +
47863 +__u32
47864 +gr_acl_handle_creat(const struct dentry * dentry,
47865 + const struct dentry * p_dentry,
47866 + const struct vfsmount * p_mnt, const int fmode,
47867 + const int imode)
47868 +{
47869 + return 1;
47870 +}
47871 +
47872 +void
47873 +gr_acl_handle_exit(void)
47874 +{
47875 + return;
47876 +}
47877 +
47878 +int
47879 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47880 +{
47881 + return 1;
47882 +}
47883 +
47884 +void
47885 +gr_set_role_label(const uid_t uid, const gid_t gid)
47886 +{
47887 + return;
47888 +}
47889 +
47890 +int
47891 +gr_acl_handle_procpidmem(const struct task_struct *task)
47892 +{
47893 + return 0;
47894 +}
47895 +
47896 +int
47897 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47898 +{
47899 + return 0;
47900 +}
47901 +
47902 +int
47903 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47904 +{
47905 + return 0;
47906 +}
47907 +
47908 +void
47909 +gr_set_kernel_label(struct task_struct *task)
47910 +{
47911 + return;
47912 +}
47913 +
47914 +int
47915 +gr_check_user_change(int real, int effective, int fs)
47916 +{
47917 + return 0;
47918 +}
47919 +
47920 +int
47921 +gr_check_group_change(int real, int effective, int fs)
47922 +{
47923 + return 0;
47924 +}
47925 +
47926 +int gr_acl_enable_at_secure(void)
47927 +{
47928 + return 0;
47929 +}
47930 +
47931 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47932 +{
47933 + return dentry->d_inode->i_sb->s_dev;
47934 +}
47935 +
47936 +EXPORT_SYMBOL(gr_is_capable);
47937 +EXPORT_SYMBOL(gr_is_capable_nolog);
47938 +EXPORT_SYMBOL(gr_learn_resource);
47939 +EXPORT_SYMBOL(gr_set_kernel_label);
47940 +#ifdef CONFIG_SECURITY
47941 +EXPORT_SYMBOL(gr_check_user_change);
47942 +EXPORT_SYMBOL(gr_check_group_change);
47943 +#endif
47944 diff -urNp linux-2.6.39.4/grsecurity/grsec_exec.c linux-2.6.39.4/grsecurity/grsec_exec.c
47945 --- linux-2.6.39.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
47946 +++ linux-2.6.39.4/grsecurity/grsec_exec.c 2011-08-05 19:44:37.000000000 -0400
47947 @@ -0,0 +1,146 @@
47948 +#include <linux/kernel.h>
47949 +#include <linux/sched.h>
47950 +#include <linux/file.h>
47951 +#include <linux/binfmts.h>
47952 +#include <linux/fs.h>
47953 +#include <linux/types.h>
47954 +#include <linux/grdefs.h>
47955 +#include <linux/grinternal.h>
47956 +#include <linux/capability.h>
47957 +#include <linux/compat.h>
47958 +
47959 +#include <asm/uaccess.h>
47960 +
47961 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47962 +static char gr_exec_arg_buf[132];
47963 +static DEFINE_MUTEX(gr_exec_arg_mutex);
47964 +#endif
47965 +
47966 +int
47967 +gr_handle_nproc(void)
47968 +{
47969 +#ifdef CONFIG_GRKERNSEC_EXECVE
47970 + const struct cred *cred = current_cred();
47971 + if (grsec_enable_execve && cred->user &&
47972 + (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
47973 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
47974 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
47975 + return -EAGAIN;
47976 + }
47977 +#endif
47978 + return 0;
47979 +}
47980 +
47981 +void
47982 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
47983 +{
47984 +#ifdef CONFIG_GRKERNSEC_EXECLOG
47985 + char *grarg = gr_exec_arg_buf;
47986 + unsigned int i, x, execlen = 0;
47987 + char c;
47988 +
47989 + if (!((grsec_enable_execlog && grsec_enable_group &&
47990 + in_group_p(grsec_audit_gid))
47991 + || (grsec_enable_execlog && !grsec_enable_group)))
47992 + return;
47993 +
47994 + mutex_lock(&gr_exec_arg_mutex);
47995 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
47996 +
47997 + if (unlikely(argv == NULL))
47998 + goto log;
47999 +
48000 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48001 + const char __user *p;
48002 + unsigned int len;
48003 +
48004 + if (copy_from_user(&p, argv + i, sizeof(p)))
48005 + goto log;
48006 + if (!p)
48007 + goto log;
48008 + len = strnlen_user(p, 128 - execlen);
48009 + if (len > 128 - execlen)
48010 + len = 128 - execlen;
48011 + else if (len > 0)
48012 + len--;
48013 + if (copy_from_user(grarg + execlen, p, len))
48014 + goto log;
48015 +
48016 + /* rewrite unprintable characters */
48017 + for (x = 0; x < len; x++) {
48018 + c = *(grarg + execlen + x);
48019 + if (c < 32 || c > 126)
48020 + *(grarg + execlen + x) = ' ';
48021 + }
48022 +
48023 + execlen += len;
48024 + *(grarg + execlen) = ' ';
48025 + *(grarg + execlen + 1) = '\0';
48026 + execlen++;
48027 + }
48028 +
48029 + log:
48030 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48031 + bprm->file->f_path.mnt, grarg);
48032 + mutex_unlock(&gr_exec_arg_mutex);
48033 +#endif
48034 + return;
48035 +}
48036 +
48037 +#ifdef CONFIG_COMPAT
48038 +void
48039 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
48040 +{
48041 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48042 + char *grarg = gr_exec_arg_buf;
48043 + unsigned int i, x, execlen = 0;
48044 + char c;
48045 +
48046 + if (!((grsec_enable_execlog && grsec_enable_group &&
48047 + in_group_p(grsec_audit_gid))
48048 + || (grsec_enable_execlog && !grsec_enable_group)))
48049 + return;
48050 +
48051 + mutex_lock(&gr_exec_arg_mutex);
48052 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
48053 +
48054 + if (unlikely(argv == NULL))
48055 + goto log;
48056 +
48057 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48058 + compat_uptr_t p;
48059 + unsigned int len;
48060 +
48061 + if (get_user(p, argv + i))
48062 + goto log;
48063 + len = strnlen_user(compat_ptr(p), 128 - execlen);
48064 + if (len > 128 - execlen)
48065 + len = 128 - execlen;
48066 + else if (len > 0)
48067 + len--;
48068 + else
48069 + goto log;
48070 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
48071 + goto log;
48072 +
48073 + /* rewrite unprintable characters */
48074 + for (x = 0; x < len; x++) {
48075 + c = *(grarg + execlen + x);
48076 + if (c < 32 || c > 126)
48077 + *(grarg + execlen + x) = ' ';
48078 + }
48079 +
48080 + execlen += len;
48081 + *(grarg + execlen) = ' ';
48082 + *(grarg + execlen + 1) = '\0';
48083 + execlen++;
48084 + }
48085 +
48086 + log:
48087 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48088 + bprm->file->f_path.mnt, grarg);
48089 + mutex_unlock(&gr_exec_arg_mutex);
48090 +#endif
48091 + return;
48092 +}
48093 +#endif
48094 diff -urNp linux-2.6.39.4/grsecurity/grsec_fifo.c linux-2.6.39.4/grsecurity/grsec_fifo.c
48095 --- linux-2.6.39.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
48096 +++ linux-2.6.39.4/grsecurity/grsec_fifo.c 2011-08-05 19:44:37.000000000 -0400
48097 @@ -0,0 +1,24 @@
48098 +#include <linux/kernel.h>
48099 +#include <linux/sched.h>
48100 +#include <linux/fs.h>
48101 +#include <linux/file.h>
48102 +#include <linux/grinternal.h>
48103 +
48104 +int
48105 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
48106 + const struct dentry *dir, const int flag, const int acc_mode)
48107 +{
48108 +#ifdef CONFIG_GRKERNSEC_FIFO
48109 + const struct cred *cred = current_cred();
48110 +
48111 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48112 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48113 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48114 + (cred->fsuid != dentry->d_inode->i_uid)) {
48115 + if (!inode_permission(dentry->d_inode, acc_mode))
48116 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48117 + return -EACCES;
48118 + }
48119 +#endif
48120 + return 0;
48121 +}
48122 diff -urNp linux-2.6.39.4/grsecurity/grsec_fork.c linux-2.6.39.4/grsecurity/grsec_fork.c
48123 --- linux-2.6.39.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48124 +++ linux-2.6.39.4/grsecurity/grsec_fork.c 2011-08-05 19:44:37.000000000 -0400
48125 @@ -0,0 +1,23 @@
48126 +#include <linux/kernel.h>
48127 +#include <linux/sched.h>
48128 +#include <linux/grsecurity.h>
48129 +#include <linux/grinternal.h>
48130 +#include <linux/errno.h>
48131 +
48132 +void
48133 +gr_log_forkfail(const int retval)
48134 +{
48135 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48136 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48137 + switch (retval) {
48138 + case -EAGAIN:
48139 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48140 + break;
48141 + case -ENOMEM:
48142 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48143 + break;
48144 + }
48145 + }
48146 +#endif
48147 + return;
48148 +}
48149 diff -urNp linux-2.6.39.4/grsecurity/grsec_init.c linux-2.6.39.4/grsecurity/grsec_init.c
48150 --- linux-2.6.39.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48151 +++ linux-2.6.39.4/grsecurity/grsec_init.c 2011-08-05 19:44:37.000000000 -0400
48152 @@ -0,0 +1,273 @@
48153 +#include <linux/kernel.h>
48154 +#include <linux/sched.h>
48155 +#include <linux/mm.h>
48156 +#include <linux/gracl.h>
48157 +#include <linux/slab.h>
48158 +#include <linux/vmalloc.h>
48159 +#include <linux/percpu.h>
48160 +#include <linux/module.h>
48161 +
48162 +int grsec_enable_brute;
48163 +int grsec_enable_link;
48164 +int grsec_enable_dmesg;
48165 +int grsec_enable_harden_ptrace;
48166 +int grsec_enable_fifo;
48167 +int grsec_enable_execve;
48168 +int grsec_enable_execlog;
48169 +int grsec_enable_signal;
48170 +int grsec_enable_forkfail;
48171 +int grsec_enable_audit_ptrace;
48172 +int grsec_enable_time;
48173 +int grsec_enable_audit_textrel;
48174 +int grsec_enable_group;
48175 +int grsec_audit_gid;
48176 +int grsec_enable_chdir;
48177 +int grsec_enable_mount;
48178 +int grsec_enable_rofs;
48179 +int grsec_enable_chroot_findtask;
48180 +int grsec_enable_chroot_mount;
48181 +int grsec_enable_chroot_shmat;
48182 +int grsec_enable_chroot_fchdir;
48183 +int grsec_enable_chroot_double;
48184 +int grsec_enable_chroot_pivot;
48185 +int grsec_enable_chroot_chdir;
48186 +int grsec_enable_chroot_chmod;
48187 +int grsec_enable_chroot_mknod;
48188 +int grsec_enable_chroot_nice;
48189 +int grsec_enable_chroot_execlog;
48190 +int grsec_enable_chroot_caps;
48191 +int grsec_enable_chroot_sysctl;
48192 +int grsec_enable_chroot_unix;
48193 +int grsec_enable_tpe;
48194 +int grsec_tpe_gid;
48195 +int grsec_enable_blackhole;
48196 +#ifdef CONFIG_IPV6_MODULE
48197 +EXPORT_SYMBOL(grsec_enable_blackhole);
48198 +#endif
48199 +int grsec_lastack_retries;
48200 +int grsec_enable_tpe_all;
48201 +int grsec_enable_tpe_invert;
48202 +int grsec_enable_socket_all;
48203 +int grsec_socket_all_gid;
48204 +int grsec_enable_socket_client;
48205 +int grsec_socket_client_gid;
48206 +int grsec_enable_socket_server;
48207 +int grsec_socket_server_gid;
48208 +int grsec_resource_logging;
48209 +int grsec_disable_privio;
48210 +int grsec_enable_log_rwxmaps;
48211 +int grsec_lock;
48212 +
48213 +DEFINE_SPINLOCK(grsec_alert_lock);
48214 +unsigned long grsec_alert_wtime = 0;
48215 +unsigned long grsec_alert_fyet = 0;
48216 +
48217 +DEFINE_SPINLOCK(grsec_audit_lock);
48218 +
48219 +DEFINE_RWLOCK(grsec_exec_file_lock);
48220 +
48221 +char *gr_shared_page[4];
48222 +
48223 +char *gr_alert_log_fmt;
48224 +char *gr_audit_log_fmt;
48225 +char *gr_alert_log_buf;
48226 +char *gr_audit_log_buf;
48227 +
48228 +extern struct gr_arg *gr_usermode;
48229 +extern unsigned char *gr_system_salt;
48230 +extern unsigned char *gr_system_sum;
48231 +
48232 +void __init
48233 +grsecurity_init(void)
48234 +{
48235 + int j;
48236 + /* create the per-cpu shared pages */
48237 +
48238 +#ifdef CONFIG_X86
48239 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48240 +#endif
48241 +
48242 + for (j = 0; j < 4; j++) {
48243 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48244 + if (gr_shared_page[j] == NULL) {
48245 + panic("Unable to allocate grsecurity shared page");
48246 + return;
48247 + }
48248 + }
48249 +
48250 + /* allocate log buffers */
48251 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48252 + if (!gr_alert_log_fmt) {
48253 + panic("Unable to allocate grsecurity alert log format buffer");
48254 + return;
48255 + }
48256 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48257 + if (!gr_audit_log_fmt) {
48258 + panic("Unable to allocate grsecurity audit log format buffer");
48259 + return;
48260 + }
48261 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48262 + if (!gr_alert_log_buf) {
48263 + panic("Unable to allocate grsecurity alert log buffer");
48264 + return;
48265 + }
48266 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48267 + if (!gr_audit_log_buf) {
48268 + panic("Unable to allocate grsecurity audit log buffer");
48269 + return;
48270 + }
48271 +
48272 + /* allocate memory for authentication structure */
48273 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48274 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48275 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48276 +
48277 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48278 + panic("Unable to allocate grsecurity authentication structure");
48279 + return;
48280 + }
48281 +
48282 +
48283 +#ifdef CONFIG_GRKERNSEC_IO
48284 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48285 + grsec_disable_privio = 1;
48286 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48287 + grsec_disable_privio = 1;
48288 +#else
48289 + grsec_disable_privio = 0;
48290 +#endif
48291 +#endif
48292 +
48293 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48294 + /* for backward compatibility, tpe_invert always defaults to on if
48295 + enabled in the kernel
48296 + */
48297 + grsec_enable_tpe_invert = 1;
48298 +#endif
48299 +
48300 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48301 +#ifndef CONFIG_GRKERNSEC_SYSCTL
48302 + grsec_lock = 1;
48303 +#endif
48304 +
48305 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48306 + grsec_enable_audit_textrel = 1;
48307 +#endif
48308 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48309 + grsec_enable_log_rwxmaps = 1;
48310 +#endif
48311 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48312 + grsec_enable_group = 1;
48313 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48314 +#endif
48315 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48316 + grsec_enable_chdir = 1;
48317 +#endif
48318 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48319 + grsec_enable_harden_ptrace = 1;
48320 +#endif
48321 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48322 + grsec_enable_mount = 1;
48323 +#endif
48324 +#ifdef CONFIG_GRKERNSEC_LINK
48325 + grsec_enable_link = 1;
48326 +#endif
48327 +#ifdef CONFIG_GRKERNSEC_BRUTE
48328 + grsec_enable_brute = 1;
48329 +#endif
48330 +#ifdef CONFIG_GRKERNSEC_DMESG
48331 + grsec_enable_dmesg = 1;
48332 +#endif
48333 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48334 + grsec_enable_blackhole = 1;
48335 + grsec_lastack_retries = 4;
48336 +#endif
48337 +#ifdef CONFIG_GRKERNSEC_FIFO
48338 + grsec_enable_fifo = 1;
48339 +#endif
48340 +#ifdef CONFIG_GRKERNSEC_EXECVE
48341 + grsec_enable_execve = 1;
48342 +#endif
48343 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48344 + grsec_enable_execlog = 1;
48345 +#endif
48346 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48347 + grsec_enable_signal = 1;
48348 +#endif
48349 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48350 + grsec_enable_forkfail = 1;
48351 +#endif
48352 +#ifdef CONFIG_GRKERNSEC_TIME
48353 + grsec_enable_time = 1;
48354 +#endif
48355 +#ifdef CONFIG_GRKERNSEC_RESLOG
48356 + grsec_resource_logging = 1;
48357 +#endif
48358 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48359 + grsec_enable_chroot_findtask = 1;
48360 +#endif
48361 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48362 + grsec_enable_chroot_unix = 1;
48363 +#endif
48364 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48365 + grsec_enable_chroot_mount = 1;
48366 +#endif
48367 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48368 + grsec_enable_chroot_fchdir = 1;
48369 +#endif
48370 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48371 + grsec_enable_chroot_shmat = 1;
48372 +#endif
48373 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48374 + grsec_enable_audit_ptrace = 1;
48375 +#endif
48376 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48377 + grsec_enable_chroot_double = 1;
48378 +#endif
48379 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48380 + grsec_enable_chroot_pivot = 1;
48381 +#endif
48382 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48383 + grsec_enable_chroot_chdir = 1;
48384 +#endif
48385 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48386 + grsec_enable_chroot_chmod = 1;
48387 +#endif
48388 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48389 + grsec_enable_chroot_mknod = 1;
48390 +#endif
48391 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48392 + grsec_enable_chroot_nice = 1;
48393 +#endif
48394 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48395 + grsec_enable_chroot_execlog = 1;
48396 +#endif
48397 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48398 + grsec_enable_chroot_caps = 1;
48399 +#endif
48400 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48401 + grsec_enable_chroot_sysctl = 1;
48402 +#endif
48403 +#ifdef CONFIG_GRKERNSEC_TPE
48404 + grsec_enable_tpe = 1;
48405 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48406 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
48407 + grsec_enable_tpe_all = 1;
48408 +#endif
48409 +#endif
48410 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48411 + grsec_enable_socket_all = 1;
48412 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48413 +#endif
48414 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48415 + grsec_enable_socket_client = 1;
48416 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48417 +#endif
48418 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48419 + grsec_enable_socket_server = 1;
48420 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48421 +#endif
48422 +#endif
48423 +
48424 + return;
48425 +}
48426 diff -urNp linux-2.6.39.4/grsecurity/grsec_link.c linux-2.6.39.4/grsecurity/grsec_link.c
48427 --- linux-2.6.39.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48428 +++ linux-2.6.39.4/grsecurity/grsec_link.c 2011-08-05 19:44:37.000000000 -0400
48429 @@ -0,0 +1,43 @@
48430 +#include <linux/kernel.h>
48431 +#include <linux/sched.h>
48432 +#include <linux/fs.h>
48433 +#include <linux/file.h>
48434 +#include <linux/grinternal.h>
48435 +
48436 +int
48437 +gr_handle_follow_link(const struct inode *parent,
48438 + const struct inode *inode,
48439 + const struct dentry *dentry, const struct vfsmount *mnt)
48440 +{
48441 +#ifdef CONFIG_GRKERNSEC_LINK
48442 + const struct cred *cred = current_cred();
48443 +
48444 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48445 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48446 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48447 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48448 + return -EACCES;
48449 + }
48450 +#endif
48451 + return 0;
48452 +}
48453 +
48454 +int
48455 +gr_handle_hardlink(const struct dentry *dentry,
48456 + const struct vfsmount *mnt,
48457 + struct inode *inode, const int mode, const char *to)
48458 +{
48459 +#ifdef CONFIG_GRKERNSEC_LINK
48460 + const struct cred *cred = current_cred();
48461 +
48462 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48463 + (!S_ISREG(mode) || (mode & S_ISUID) ||
48464 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48465 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48466 + !capable(CAP_FOWNER) && cred->uid) {
48467 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48468 + return -EPERM;
48469 + }
48470 +#endif
48471 + return 0;
48472 +}
48473 diff -urNp linux-2.6.39.4/grsecurity/grsec_log.c linux-2.6.39.4/grsecurity/grsec_log.c
48474 --- linux-2.6.39.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48475 +++ linux-2.6.39.4/grsecurity/grsec_log.c 2011-08-05 19:44:37.000000000 -0400
48476 @@ -0,0 +1,310 @@
48477 +#include <linux/kernel.h>
48478 +#include <linux/sched.h>
48479 +#include <linux/file.h>
48480 +#include <linux/tty.h>
48481 +#include <linux/fs.h>
48482 +#include <linux/grinternal.h>
48483 +
48484 +#ifdef CONFIG_TREE_PREEMPT_RCU
48485 +#define DISABLE_PREEMPT() preempt_disable()
48486 +#define ENABLE_PREEMPT() preempt_enable()
48487 +#else
48488 +#define DISABLE_PREEMPT()
48489 +#define ENABLE_PREEMPT()
48490 +#endif
48491 +
48492 +#define BEGIN_LOCKS(x) \
48493 + DISABLE_PREEMPT(); \
48494 + rcu_read_lock(); \
48495 + read_lock(&tasklist_lock); \
48496 + read_lock(&grsec_exec_file_lock); \
48497 + if (x != GR_DO_AUDIT) \
48498 + spin_lock(&grsec_alert_lock); \
48499 + else \
48500 + spin_lock(&grsec_audit_lock)
48501 +
48502 +#define END_LOCKS(x) \
48503 + if (x != GR_DO_AUDIT) \
48504 + spin_unlock(&grsec_alert_lock); \
48505 + else \
48506 + spin_unlock(&grsec_audit_lock); \
48507 + read_unlock(&grsec_exec_file_lock); \
48508 + read_unlock(&tasklist_lock); \
48509 + rcu_read_unlock(); \
48510 + ENABLE_PREEMPT(); \
48511 + if (x == GR_DONT_AUDIT) \
48512 + gr_handle_alertkill(current)
48513 +
48514 +enum {
48515 + FLOODING,
48516 + NO_FLOODING
48517 +};
48518 +
48519 +extern char *gr_alert_log_fmt;
48520 +extern char *gr_audit_log_fmt;
48521 +extern char *gr_alert_log_buf;
48522 +extern char *gr_audit_log_buf;
48523 +
48524 +static int gr_log_start(int audit)
48525 +{
48526 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48527 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48528 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48529 +
48530 + if (audit == GR_DO_AUDIT)
48531 + goto set_fmt;
48532 +
48533 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48534 + grsec_alert_wtime = jiffies;
48535 + grsec_alert_fyet = 0;
48536 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48537 + grsec_alert_fyet++;
48538 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48539 + grsec_alert_wtime = jiffies;
48540 + grsec_alert_fyet++;
48541 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48542 + return FLOODING;
48543 + } else return FLOODING;
48544 +
48545 +set_fmt:
48546 + memset(buf, 0, PAGE_SIZE);
48547 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
48548 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48549 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48550 + } else if (current->signal->curr_ip) {
48551 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48552 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48553 + } else if (gr_acl_is_enabled()) {
48554 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48555 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48556 + } else {
48557 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
48558 + strcpy(buf, fmt);
48559 + }
48560 +
48561 + return NO_FLOODING;
48562 +}
48563 +
48564 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48565 + __attribute__ ((format (printf, 2, 0)));
48566 +
48567 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48568 +{
48569 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48570 + unsigned int len = strlen(buf);
48571 +
48572 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48573 +
48574 + return;
48575 +}
48576 +
48577 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48578 + __attribute__ ((format (printf, 2, 3)));
48579 +
48580 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48581 +{
48582 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48583 + unsigned int len = strlen(buf);
48584 + va_list ap;
48585 +
48586 + va_start(ap, msg);
48587 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48588 + va_end(ap);
48589 +
48590 + return;
48591 +}
48592 +
48593 +static void gr_log_end(int audit)
48594 +{
48595 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48596 + unsigned int len = strlen(buf);
48597 +
48598 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48599 + printk("%s\n", buf);
48600 +
48601 + return;
48602 +}
48603 +
48604 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48605 +{
48606 + int logtype;
48607 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48608 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48609 + void *voidptr = NULL;
48610 + int num1 = 0, num2 = 0;
48611 + unsigned long ulong1 = 0, ulong2 = 0;
48612 + struct dentry *dentry = NULL;
48613 + struct vfsmount *mnt = NULL;
48614 + struct file *file = NULL;
48615 + struct task_struct *task = NULL;
48616 + const struct cred *cred, *pcred;
48617 + va_list ap;
48618 +
48619 + BEGIN_LOCKS(audit);
48620 + logtype = gr_log_start(audit);
48621 + if (logtype == FLOODING) {
48622 + END_LOCKS(audit);
48623 + return;
48624 + }
48625 + va_start(ap, argtypes);
48626 + switch (argtypes) {
48627 + case GR_TTYSNIFF:
48628 + task = va_arg(ap, struct task_struct *);
48629 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48630 + break;
48631 + case GR_SYSCTL_HIDDEN:
48632 + str1 = va_arg(ap, char *);
48633 + gr_log_middle_varargs(audit, msg, result, str1);
48634 + break;
48635 + case GR_RBAC:
48636 + dentry = va_arg(ap, struct dentry *);
48637 + mnt = va_arg(ap, struct vfsmount *);
48638 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48639 + break;
48640 + case GR_RBAC_STR:
48641 + dentry = va_arg(ap, struct dentry *);
48642 + mnt = va_arg(ap, struct vfsmount *);
48643 + str1 = va_arg(ap, char *);
48644 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48645 + break;
48646 + case GR_STR_RBAC:
48647 + str1 = va_arg(ap, char *);
48648 + dentry = va_arg(ap, struct dentry *);
48649 + mnt = va_arg(ap, struct vfsmount *);
48650 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48651 + break;
48652 + case GR_RBAC_MODE2:
48653 + dentry = va_arg(ap, struct dentry *);
48654 + mnt = va_arg(ap, struct vfsmount *);
48655 + str1 = va_arg(ap, char *);
48656 + str2 = va_arg(ap, char *);
48657 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48658 + break;
48659 + case GR_RBAC_MODE3:
48660 + dentry = va_arg(ap, struct dentry *);
48661 + mnt = va_arg(ap, struct vfsmount *);
48662 + str1 = va_arg(ap, char *);
48663 + str2 = va_arg(ap, char *);
48664 + str3 = va_arg(ap, char *);
48665 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48666 + break;
48667 + case GR_FILENAME:
48668 + dentry = va_arg(ap, struct dentry *);
48669 + mnt = va_arg(ap, struct vfsmount *);
48670 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48671 + break;
48672 + case GR_STR_FILENAME:
48673 + str1 = va_arg(ap, char *);
48674 + dentry = va_arg(ap, struct dentry *);
48675 + mnt = va_arg(ap, struct vfsmount *);
48676 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48677 + break;
48678 + case GR_FILENAME_STR:
48679 + dentry = va_arg(ap, struct dentry *);
48680 + mnt = va_arg(ap, struct vfsmount *);
48681 + str1 = va_arg(ap, char *);
48682 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48683 + break;
48684 + case GR_FILENAME_TWO_INT:
48685 + dentry = va_arg(ap, struct dentry *);
48686 + mnt = va_arg(ap, struct vfsmount *);
48687 + num1 = va_arg(ap, int);
48688 + num2 = va_arg(ap, int);
48689 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48690 + break;
48691 + case GR_FILENAME_TWO_INT_STR:
48692 + dentry = va_arg(ap, struct dentry *);
48693 + mnt = va_arg(ap, struct vfsmount *);
48694 + num1 = va_arg(ap, int);
48695 + num2 = va_arg(ap, int);
48696 + str1 = va_arg(ap, char *);
48697 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48698 + break;
48699 + case GR_TEXTREL:
48700 + file = va_arg(ap, struct file *);
48701 + ulong1 = va_arg(ap, unsigned long);
48702 + ulong2 = va_arg(ap, unsigned long);
48703 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48704 + break;
48705 + case GR_PTRACE:
48706 + task = va_arg(ap, struct task_struct *);
48707 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48708 + break;
48709 + case GR_RESOURCE:
48710 + task = va_arg(ap, struct task_struct *);
48711 + cred = __task_cred(task);
48712 + pcred = __task_cred(task->real_parent);
48713 + ulong1 = va_arg(ap, unsigned long);
48714 + str1 = va_arg(ap, char *);
48715 + ulong2 = va_arg(ap, unsigned long);
48716 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48717 + break;
48718 + case GR_CAP:
48719 + task = va_arg(ap, struct task_struct *);
48720 + cred = __task_cred(task);
48721 + pcred = __task_cred(task->real_parent);
48722 + str1 = va_arg(ap, char *);
48723 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48724 + break;
48725 + case GR_SIG:
48726 + str1 = va_arg(ap, char *);
48727 + voidptr = va_arg(ap, void *);
48728 + gr_log_middle_varargs(audit, msg, str1, voidptr);
48729 + break;
48730 + case GR_SIG2:
48731 + task = va_arg(ap, struct task_struct *);
48732 + cred = __task_cred(task);
48733 + pcred = __task_cred(task->real_parent);
48734 + num1 = va_arg(ap, int);
48735 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48736 + break;
48737 + case GR_CRASH1:
48738 + task = va_arg(ap, struct task_struct *);
48739 + cred = __task_cred(task);
48740 + pcred = __task_cred(task->real_parent);
48741 + ulong1 = va_arg(ap, unsigned long);
48742 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48743 + break;
48744 + case GR_CRASH2:
48745 + task = va_arg(ap, struct task_struct *);
48746 + cred = __task_cred(task);
48747 + pcred = __task_cred(task->real_parent);
48748 + ulong1 = va_arg(ap, unsigned long);
48749 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48750 + break;
48751 + case GR_RWXMAP:
48752 + file = va_arg(ap, struct file *);
48753 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48754 + break;
48755 + case GR_PSACCT:
48756 + {
48757 + unsigned int wday, cday;
48758 + __u8 whr, chr;
48759 + __u8 wmin, cmin;
48760 + __u8 wsec, csec;
48761 + char cur_tty[64] = { 0 };
48762 + char parent_tty[64] = { 0 };
48763 +
48764 + task = va_arg(ap, struct task_struct *);
48765 + wday = va_arg(ap, unsigned int);
48766 + cday = va_arg(ap, unsigned int);
48767 + whr = va_arg(ap, int);
48768 + chr = va_arg(ap, int);
48769 + wmin = va_arg(ap, int);
48770 + cmin = va_arg(ap, int);
48771 + wsec = va_arg(ap, int);
48772 + csec = va_arg(ap, int);
48773 + ulong1 = va_arg(ap, unsigned long);
48774 + cred = __task_cred(task);
48775 + pcred = __task_cred(task->real_parent);
48776 +
48777 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48778 + }
48779 + break;
48780 + default:
48781 + gr_log_middle(audit, msg, ap);
48782 + }
48783 + va_end(ap);
48784 + gr_log_end(audit);
48785 + END_LOCKS(audit);
48786 +}
48787 diff -urNp linux-2.6.39.4/grsecurity/grsec_mem.c linux-2.6.39.4/grsecurity/grsec_mem.c
48788 --- linux-2.6.39.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48789 +++ linux-2.6.39.4/grsecurity/grsec_mem.c 2011-08-05 19:44:37.000000000 -0400
48790 @@ -0,0 +1,33 @@
48791 +#include <linux/kernel.h>
48792 +#include <linux/sched.h>
48793 +#include <linux/mm.h>
48794 +#include <linux/mman.h>
48795 +#include <linux/grinternal.h>
48796 +
48797 +void
48798 +gr_handle_ioperm(void)
48799 +{
48800 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48801 + return;
48802 +}
48803 +
48804 +void
48805 +gr_handle_iopl(void)
48806 +{
48807 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48808 + return;
48809 +}
48810 +
48811 +void
48812 +gr_handle_mem_readwrite(u64 from, u64 to)
48813 +{
48814 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48815 + return;
48816 +}
48817 +
48818 +void
48819 +gr_handle_vm86(void)
48820 +{
48821 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48822 + return;
48823 +}
48824 diff -urNp linux-2.6.39.4/grsecurity/grsec_mount.c linux-2.6.39.4/grsecurity/grsec_mount.c
48825 --- linux-2.6.39.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48826 +++ linux-2.6.39.4/grsecurity/grsec_mount.c 2011-08-05 19:44:37.000000000 -0400
48827 @@ -0,0 +1,62 @@
48828 +#include <linux/kernel.h>
48829 +#include <linux/sched.h>
48830 +#include <linux/mount.h>
48831 +#include <linux/grsecurity.h>
48832 +#include <linux/grinternal.h>
48833 +
48834 +void
48835 +gr_log_remount(const char *devname, const int retval)
48836 +{
48837 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48838 + if (grsec_enable_mount && (retval >= 0))
48839 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48840 +#endif
48841 + return;
48842 +}
48843 +
48844 +void
48845 +gr_log_unmount(const char *devname, const int retval)
48846 +{
48847 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48848 + if (grsec_enable_mount && (retval >= 0))
48849 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48850 +#endif
48851 + return;
48852 +}
48853 +
48854 +void
48855 +gr_log_mount(const char *from, const char *to, const int retval)
48856 +{
48857 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48858 + if (grsec_enable_mount && (retval >= 0))
48859 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48860 +#endif
48861 + return;
48862 +}
48863 +
48864 +int
48865 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48866 +{
48867 +#ifdef CONFIG_GRKERNSEC_ROFS
48868 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48869 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48870 + return -EPERM;
48871 + } else
48872 + return 0;
48873 +#endif
48874 + return 0;
48875 +}
48876 +
48877 +int
48878 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48879 +{
48880 +#ifdef CONFIG_GRKERNSEC_ROFS
48881 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48882 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48883 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48884 + return -EPERM;
48885 + } else
48886 + return 0;
48887 +#endif
48888 + return 0;
48889 +}
48890 diff -urNp linux-2.6.39.4/grsecurity/grsec_pax.c linux-2.6.39.4/grsecurity/grsec_pax.c
48891 --- linux-2.6.39.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48892 +++ linux-2.6.39.4/grsecurity/grsec_pax.c 2011-08-05 19:44:37.000000000 -0400
48893 @@ -0,0 +1,36 @@
48894 +#include <linux/kernel.h>
48895 +#include <linux/sched.h>
48896 +#include <linux/mm.h>
48897 +#include <linux/file.h>
48898 +#include <linux/grinternal.h>
48899 +#include <linux/grsecurity.h>
48900 +
48901 +void
48902 +gr_log_textrel(struct vm_area_struct * vma)
48903 +{
48904 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48905 + if (grsec_enable_audit_textrel)
48906 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48907 +#endif
48908 + return;
48909 +}
48910 +
48911 +void
48912 +gr_log_rwxmmap(struct file *file)
48913 +{
48914 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48915 + if (grsec_enable_log_rwxmaps)
48916 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
48917 +#endif
48918 + return;
48919 +}
48920 +
48921 +void
48922 +gr_log_rwxmprotect(struct file *file)
48923 +{
48924 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48925 + if (grsec_enable_log_rwxmaps)
48926 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
48927 +#endif
48928 + return;
48929 +}
48930 diff -urNp linux-2.6.39.4/grsecurity/grsec_ptrace.c linux-2.6.39.4/grsecurity/grsec_ptrace.c
48931 --- linux-2.6.39.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
48932 +++ linux-2.6.39.4/grsecurity/grsec_ptrace.c 2011-08-05 19:44:37.000000000 -0400
48933 @@ -0,0 +1,14 @@
48934 +#include <linux/kernel.h>
48935 +#include <linux/sched.h>
48936 +#include <linux/grinternal.h>
48937 +#include <linux/grsecurity.h>
48938 +
48939 +void
48940 +gr_audit_ptrace(struct task_struct *task)
48941 +{
48942 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48943 + if (grsec_enable_audit_ptrace)
48944 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
48945 +#endif
48946 + return;
48947 +}
48948 diff -urNp linux-2.6.39.4/grsecurity/grsec_sig.c linux-2.6.39.4/grsecurity/grsec_sig.c
48949 --- linux-2.6.39.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
48950 +++ linux-2.6.39.4/grsecurity/grsec_sig.c 2011-08-05 19:44:37.000000000 -0400
48951 @@ -0,0 +1,206 @@
48952 +#include <linux/kernel.h>
48953 +#include <linux/sched.h>
48954 +#include <linux/delay.h>
48955 +#include <linux/grsecurity.h>
48956 +#include <linux/grinternal.h>
48957 +#include <linux/hardirq.h>
48958 +
48959 +char *signames[] = {
48960 + [SIGSEGV] = "Segmentation fault",
48961 + [SIGILL] = "Illegal instruction",
48962 + [SIGABRT] = "Abort",
48963 + [SIGBUS] = "Invalid alignment/Bus error"
48964 +};
48965 +
48966 +void
48967 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
48968 +{
48969 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48970 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
48971 + (sig == SIGABRT) || (sig == SIGBUS))) {
48972 + if (t->pid == current->pid) {
48973 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
48974 + } else {
48975 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
48976 + }
48977 + }
48978 +#endif
48979 + return;
48980 +}
48981 +
48982 +int
48983 +gr_handle_signal(const struct task_struct *p, const int sig)
48984 +{
48985 +#ifdef CONFIG_GRKERNSEC
48986 + if (current->pid > 1 && gr_check_protected_task(p)) {
48987 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
48988 + return -EPERM;
48989 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
48990 + return -EPERM;
48991 + }
48992 +#endif
48993 + return 0;
48994 +}
48995 +
48996 +#ifdef CONFIG_GRKERNSEC
48997 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
48998 +
48999 +int gr_fake_force_sig(int sig, struct task_struct *t)
49000 +{
49001 + unsigned long int flags;
49002 + int ret, blocked, ignored;
49003 + struct k_sigaction *action;
49004 +
49005 + spin_lock_irqsave(&t->sighand->siglock, flags);
49006 + action = &t->sighand->action[sig-1];
49007 + ignored = action->sa.sa_handler == SIG_IGN;
49008 + blocked = sigismember(&t->blocked, sig);
49009 + if (blocked || ignored) {
49010 + action->sa.sa_handler = SIG_DFL;
49011 + if (blocked) {
49012 + sigdelset(&t->blocked, sig);
49013 + recalc_sigpending_and_wake(t);
49014 + }
49015 + }
49016 + if (action->sa.sa_handler == SIG_DFL)
49017 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
49018 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
49019 +
49020 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
49021 +
49022 + return ret;
49023 +}
49024 +#endif
49025 +
49026 +#ifdef CONFIG_GRKERNSEC_BRUTE
49027 +#define GR_USER_BAN_TIME (15 * 60)
49028 +
49029 +static int __get_dumpable(unsigned long mm_flags)
49030 +{
49031 + int ret;
49032 +
49033 + ret = mm_flags & MMF_DUMPABLE_MASK;
49034 + return (ret >= 2) ? 2 : ret;
49035 +}
49036 +#endif
49037 +
49038 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
49039 +{
49040 +#ifdef CONFIG_GRKERNSEC_BRUTE
49041 + uid_t uid = 0;
49042 +
49043 + if (!grsec_enable_brute)
49044 + return;
49045 +
49046 + rcu_read_lock();
49047 + read_lock(&tasklist_lock);
49048 + read_lock(&grsec_exec_file_lock);
49049 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
49050 + p->real_parent->brute = 1;
49051 + else {
49052 + const struct cred *cred = __task_cred(p), *cred2;
49053 + struct task_struct *tsk, *tsk2;
49054 +
49055 + if (!__get_dumpable(mm_flags) && cred->uid) {
49056 + struct user_struct *user;
49057 +
49058 + uid = cred->uid;
49059 +
49060 + /* this is put upon execution past expiration */
49061 + user = find_user(uid);
49062 + if (user == NULL)
49063 + goto unlock;
49064 + user->banned = 1;
49065 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
49066 + if (user->ban_expires == ~0UL)
49067 + user->ban_expires--;
49068 +
49069 + do_each_thread(tsk2, tsk) {
49070 + cred2 = __task_cred(tsk);
49071 + if (tsk != p && cred2->uid == uid)
49072 + gr_fake_force_sig(SIGKILL, tsk);
49073 + } while_each_thread(tsk2, tsk);
49074 + }
49075 + }
49076 +unlock:
49077 + read_unlock(&grsec_exec_file_lock);
49078 + read_unlock(&tasklist_lock);
49079 + rcu_read_unlock();
49080 +
49081 + if (uid)
49082 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
49083 +
49084 +#endif
49085 + return;
49086 +}
49087 +
49088 +void gr_handle_brute_check(void)
49089 +{
49090 +#ifdef CONFIG_GRKERNSEC_BRUTE
49091 + if (current->brute)
49092 + msleep(30 * 1000);
49093 +#endif
49094 + return;
49095 +}
49096 +
49097 +void gr_handle_kernel_exploit(void)
49098 +{
49099 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
49100 + const struct cred *cred;
49101 + struct task_struct *tsk, *tsk2;
49102 + struct user_struct *user;
49103 + uid_t uid;
49104 +
49105 + if (in_irq() || in_serving_softirq() || in_nmi())
49106 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
49107 +
49108 + uid = current_uid();
49109 +
49110 + if (uid == 0)
49111 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
49112 + else {
49113 + /* kill all the processes of this user, hold a reference
49114 + to their creds struct, and prevent them from creating
49115 + another process until system reset
49116 + */
49117 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49118 + /* we intentionally leak this ref */
49119 + user = get_uid(current->cred->user);
49120 + if (user) {
49121 + user->banned = 1;
49122 + user->ban_expires = ~0UL;
49123 + }
49124 +
49125 + read_lock(&tasklist_lock);
49126 + do_each_thread(tsk2, tsk) {
49127 + cred = __task_cred(tsk);
49128 + if (cred->uid == uid)
49129 + gr_fake_force_sig(SIGKILL, tsk);
49130 + } while_each_thread(tsk2, tsk);
49131 + read_unlock(&tasklist_lock);
49132 + }
49133 +#endif
49134 +}
49135 +
49136 +int __gr_process_user_ban(struct user_struct *user)
49137 +{
49138 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49139 + if (unlikely(user->banned)) {
49140 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49141 + user->banned = 0;
49142 + user->ban_expires = 0;
49143 + free_uid(user);
49144 + } else
49145 + return -EPERM;
49146 + }
49147 +#endif
49148 + return 0;
49149 +}
49150 +
49151 +int gr_process_user_ban(void)
49152 +{
49153 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49154 + return __gr_process_user_ban(current->cred->user);
49155 +#endif
49156 + return 0;
49157 +}
49158 diff -urNp linux-2.6.39.4/grsecurity/grsec_sock.c linux-2.6.39.4/grsecurity/grsec_sock.c
49159 --- linux-2.6.39.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49160 +++ linux-2.6.39.4/grsecurity/grsec_sock.c 2011-08-05 19:44:37.000000000 -0400
49161 @@ -0,0 +1,244 @@
49162 +#include <linux/kernel.h>
49163 +#include <linux/module.h>
49164 +#include <linux/sched.h>
49165 +#include <linux/file.h>
49166 +#include <linux/net.h>
49167 +#include <linux/in.h>
49168 +#include <linux/ip.h>
49169 +#include <net/sock.h>
49170 +#include <net/inet_sock.h>
49171 +#include <linux/grsecurity.h>
49172 +#include <linux/grinternal.h>
49173 +#include <linux/gracl.h>
49174 +
49175 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49176 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49177 +
49178 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
49179 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
49180 +
49181 +#ifdef CONFIG_UNIX_MODULE
49182 +EXPORT_SYMBOL(gr_acl_handle_unix);
49183 +EXPORT_SYMBOL(gr_acl_handle_mknod);
49184 +EXPORT_SYMBOL(gr_handle_chroot_unix);
49185 +EXPORT_SYMBOL(gr_handle_create);
49186 +#endif
49187 +
49188 +#ifdef CONFIG_GRKERNSEC
49189 +#define gr_conn_table_size 32749
49190 +struct conn_table_entry {
49191 + struct conn_table_entry *next;
49192 + struct signal_struct *sig;
49193 +};
49194 +
49195 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49196 +DEFINE_SPINLOCK(gr_conn_table_lock);
49197 +
49198 +extern const char * gr_socktype_to_name(unsigned char type);
49199 +extern const char * gr_proto_to_name(unsigned char proto);
49200 +extern const char * gr_sockfamily_to_name(unsigned char family);
49201 +
49202 +static __inline__ int
49203 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49204 +{
49205 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49206 +}
49207 +
49208 +static __inline__ int
49209 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49210 + __u16 sport, __u16 dport)
49211 +{
49212 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49213 + sig->gr_sport == sport && sig->gr_dport == dport))
49214 + return 1;
49215 + else
49216 + return 0;
49217 +}
49218 +
49219 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49220 +{
49221 + struct conn_table_entry **match;
49222 + unsigned int index;
49223 +
49224 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49225 + sig->gr_sport, sig->gr_dport,
49226 + gr_conn_table_size);
49227 +
49228 + newent->sig = sig;
49229 +
49230 + match = &gr_conn_table[index];
49231 + newent->next = *match;
49232 + *match = newent;
49233 +
49234 + return;
49235 +}
49236 +
49237 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49238 +{
49239 + struct conn_table_entry *match, *last = NULL;
49240 + unsigned int index;
49241 +
49242 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49243 + sig->gr_sport, sig->gr_dport,
49244 + gr_conn_table_size);
49245 +
49246 + match = gr_conn_table[index];
49247 + while (match && !conn_match(match->sig,
49248 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49249 + sig->gr_dport)) {
49250 + last = match;
49251 + match = match->next;
49252 + }
49253 +
49254 + if (match) {
49255 + if (last)
49256 + last->next = match->next;
49257 + else
49258 + gr_conn_table[index] = NULL;
49259 + kfree(match);
49260 + }
49261 +
49262 + return;
49263 +}
49264 +
49265 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49266 + __u16 sport, __u16 dport)
49267 +{
49268 + struct conn_table_entry *match;
49269 + unsigned int index;
49270 +
49271 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49272 +
49273 + match = gr_conn_table[index];
49274 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49275 + match = match->next;
49276 +
49277 + if (match)
49278 + return match->sig;
49279 + else
49280 + return NULL;
49281 +}
49282 +
49283 +#endif
49284 +
49285 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49286 +{
49287 +#ifdef CONFIG_GRKERNSEC
49288 + struct signal_struct *sig = task->signal;
49289 + struct conn_table_entry *newent;
49290 +
49291 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49292 + if (newent == NULL)
49293 + return;
49294 + /* no bh lock needed since we are called with bh disabled */
49295 + spin_lock(&gr_conn_table_lock);
49296 + gr_del_task_from_ip_table_nolock(sig);
49297 + sig->gr_saddr = inet->inet_rcv_saddr;
49298 + sig->gr_daddr = inet->inet_daddr;
49299 + sig->gr_sport = inet->inet_sport;
49300 + sig->gr_dport = inet->inet_dport;
49301 + gr_add_to_task_ip_table_nolock(sig, newent);
49302 + spin_unlock(&gr_conn_table_lock);
49303 +#endif
49304 + return;
49305 +}
49306 +
49307 +void gr_del_task_from_ip_table(struct task_struct *task)
49308 +{
49309 +#ifdef CONFIG_GRKERNSEC
49310 + spin_lock_bh(&gr_conn_table_lock);
49311 + gr_del_task_from_ip_table_nolock(task->signal);
49312 + spin_unlock_bh(&gr_conn_table_lock);
49313 +#endif
49314 + return;
49315 +}
49316 +
49317 +void
49318 +gr_attach_curr_ip(const struct sock *sk)
49319 +{
49320 +#ifdef CONFIG_GRKERNSEC
49321 + struct signal_struct *p, *set;
49322 + const struct inet_sock *inet = inet_sk(sk);
49323 +
49324 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49325 + return;
49326 +
49327 + set = current->signal;
49328 +
49329 + spin_lock_bh(&gr_conn_table_lock);
49330 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49331 + inet->inet_dport, inet->inet_sport);
49332 + if (unlikely(p != NULL)) {
49333 + set->curr_ip = p->curr_ip;
49334 + set->used_accept = 1;
49335 + gr_del_task_from_ip_table_nolock(p);
49336 + spin_unlock_bh(&gr_conn_table_lock);
49337 + return;
49338 + }
49339 + spin_unlock_bh(&gr_conn_table_lock);
49340 +
49341 + set->curr_ip = inet->inet_daddr;
49342 + set->used_accept = 1;
49343 +#endif
49344 + return;
49345 +}
49346 +
49347 +int
49348 +gr_handle_sock_all(const int family, const int type, const int protocol)
49349 +{
49350 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49351 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49352 + (family != AF_UNIX)) {
49353 + if (family == AF_INET)
49354 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49355 + else
49356 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49357 + return -EACCES;
49358 + }
49359 +#endif
49360 + return 0;
49361 +}
49362 +
49363 +int
49364 +gr_handle_sock_server(const struct sockaddr *sck)
49365 +{
49366 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49367 + if (grsec_enable_socket_server &&
49368 + in_group_p(grsec_socket_server_gid) &&
49369 + sck && (sck->sa_family != AF_UNIX) &&
49370 + (sck->sa_family != AF_LOCAL)) {
49371 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49372 + return -EACCES;
49373 + }
49374 +#endif
49375 + return 0;
49376 +}
49377 +
49378 +int
49379 +gr_handle_sock_server_other(const struct sock *sck)
49380 +{
49381 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49382 + if (grsec_enable_socket_server &&
49383 + in_group_p(grsec_socket_server_gid) &&
49384 + sck && (sck->sk_family != AF_UNIX) &&
49385 + (sck->sk_family != AF_LOCAL)) {
49386 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49387 + return -EACCES;
49388 + }
49389 +#endif
49390 + return 0;
49391 +}
49392 +
49393 +int
49394 +gr_handle_sock_client(const struct sockaddr *sck)
49395 +{
49396 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49397 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49398 + sck && (sck->sa_family != AF_UNIX) &&
49399 + (sck->sa_family != AF_LOCAL)) {
49400 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49401 + return -EACCES;
49402 + }
49403 +#endif
49404 + return 0;
49405 +}
49406 diff -urNp linux-2.6.39.4/grsecurity/grsec_sysctl.c linux-2.6.39.4/grsecurity/grsec_sysctl.c
49407 --- linux-2.6.39.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49408 +++ linux-2.6.39.4/grsecurity/grsec_sysctl.c 2011-08-05 19:44:37.000000000 -0400
49409 @@ -0,0 +1,442 @@
49410 +#include <linux/kernel.h>
49411 +#include <linux/sched.h>
49412 +#include <linux/sysctl.h>
49413 +#include <linux/grsecurity.h>
49414 +#include <linux/grinternal.h>
49415 +
49416 +int
49417 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49418 +{
49419 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49420 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49421 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49422 + return -EACCES;
49423 + }
49424 +#endif
49425 + return 0;
49426 +}
49427 +
49428 +#ifdef CONFIG_GRKERNSEC_ROFS
49429 +static int __maybe_unused one = 1;
49430 +#endif
49431 +
49432 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49433 +struct ctl_table grsecurity_table[] = {
49434 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49435 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49436 +#ifdef CONFIG_GRKERNSEC_IO
49437 + {
49438 + .procname = "disable_priv_io",
49439 + .data = &grsec_disable_privio,
49440 + .maxlen = sizeof(int),
49441 + .mode = 0600,
49442 + .proc_handler = &proc_dointvec,
49443 + },
49444 +#endif
49445 +#endif
49446 +#ifdef CONFIG_GRKERNSEC_LINK
49447 + {
49448 + .procname = "linking_restrictions",
49449 + .data = &grsec_enable_link,
49450 + .maxlen = sizeof(int),
49451 + .mode = 0600,
49452 + .proc_handler = &proc_dointvec,
49453 + },
49454 +#endif
49455 +#ifdef CONFIG_GRKERNSEC_BRUTE
49456 + {
49457 + .procname = "deter_bruteforce",
49458 + .data = &grsec_enable_brute,
49459 + .maxlen = sizeof(int),
49460 + .mode = 0600,
49461 + .proc_handler = &proc_dointvec,
49462 + },
49463 +#endif
49464 +#ifdef CONFIG_GRKERNSEC_FIFO
49465 + {
49466 + .procname = "fifo_restrictions",
49467 + .data = &grsec_enable_fifo,
49468 + .maxlen = sizeof(int),
49469 + .mode = 0600,
49470 + .proc_handler = &proc_dointvec,
49471 + },
49472 +#endif
49473 +#ifdef CONFIG_GRKERNSEC_EXECVE
49474 + {
49475 + .procname = "execve_limiting",
49476 + .data = &grsec_enable_execve,
49477 + .maxlen = sizeof(int),
49478 + .mode = 0600,
49479 + .proc_handler = &proc_dointvec,
49480 + },
49481 +#endif
49482 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49483 + {
49484 + .procname = "ip_blackhole",
49485 + .data = &grsec_enable_blackhole,
49486 + .maxlen = sizeof(int),
49487 + .mode = 0600,
49488 + .proc_handler = &proc_dointvec,
49489 + },
49490 + {
49491 + .procname = "lastack_retries",
49492 + .data = &grsec_lastack_retries,
49493 + .maxlen = sizeof(int),
49494 + .mode = 0600,
49495 + .proc_handler = &proc_dointvec,
49496 + },
49497 +#endif
49498 +#ifdef CONFIG_GRKERNSEC_EXECLOG
49499 + {
49500 + .procname = "exec_logging",
49501 + .data = &grsec_enable_execlog,
49502 + .maxlen = sizeof(int),
49503 + .mode = 0600,
49504 + .proc_handler = &proc_dointvec,
49505 + },
49506 +#endif
49507 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49508 + {
49509 + .procname = "rwxmap_logging",
49510 + .data = &grsec_enable_log_rwxmaps,
49511 + .maxlen = sizeof(int),
49512 + .mode = 0600,
49513 + .proc_handler = &proc_dointvec,
49514 + },
49515 +#endif
49516 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49517 + {
49518 + .procname = "signal_logging",
49519 + .data = &grsec_enable_signal,
49520 + .maxlen = sizeof(int),
49521 + .mode = 0600,
49522 + .proc_handler = &proc_dointvec,
49523 + },
49524 +#endif
49525 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
49526 + {
49527 + .procname = "forkfail_logging",
49528 + .data = &grsec_enable_forkfail,
49529 + .maxlen = sizeof(int),
49530 + .mode = 0600,
49531 + .proc_handler = &proc_dointvec,
49532 + },
49533 +#endif
49534 +#ifdef CONFIG_GRKERNSEC_TIME
49535 + {
49536 + .procname = "timechange_logging",
49537 + .data = &grsec_enable_time,
49538 + .maxlen = sizeof(int),
49539 + .mode = 0600,
49540 + .proc_handler = &proc_dointvec,
49541 + },
49542 +#endif
49543 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49544 + {
49545 + .procname = "chroot_deny_shmat",
49546 + .data = &grsec_enable_chroot_shmat,
49547 + .maxlen = sizeof(int),
49548 + .mode = 0600,
49549 + .proc_handler = &proc_dointvec,
49550 + },
49551 +#endif
49552 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49553 + {
49554 + .procname = "chroot_deny_unix",
49555 + .data = &grsec_enable_chroot_unix,
49556 + .maxlen = sizeof(int),
49557 + .mode = 0600,
49558 + .proc_handler = &proc_dointvec,
49559 + },
49560 +#endif
49561 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49562 + {
49563 + .procname = "chroot_deny_mount",
49564 + .data = &grsec_enable_chroot_mount,
49565 + .maxlen = sizeof(int),
49566 + .mode = 0600,
49567 + .proc_handler = &proc_dointvec,
49568 + },
49569 +#endif
49570 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49571 + {
49572 + .procname = "chroot_deny_fchdir",
49573 + .data = &grsec_enable_chroot_fchdir,
49574 + .maxlen = sizeof(int),
49575 + .mode = 0600,
49576 + .proc_handler = &proc_dointvec,
49577 + },
49578 +#endif
49579 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49580 + {
49581 + .procname = "chroot_deny_chroot",
49582 + .data = &grsec_enable_chroot_double,
49583 + .maxlen = sizeof(int),
49584 + .mode = 0600,
49585 + .proc_handler = &proc_dointvec,
49586 + },
49587 +#endif
49588 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49589 + {
49590 + .procname = "chroot_deny_pivot",
49591 + .data = &grsec_enable_chroot_pivot,
49592 + .maxlen = sizeof(int),
49593 + .mode = 0600,
49594 + .proc_handler = &proc_dointvec,
49595 + },
49596 +#endif
49597 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49598 + {
49599 + .procname = "chroot_enforce_chdir",
49600 + .data = &grsec_enable_chroot_chdir,
49601 + .maxlen = sizeof(int),
49602 + .mode = 0600,
49603 + .proc_handler = &proc_dointvec,
49604 + },
49605 +#endif
49606 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49607 + {
49608 + .procname = "chroot_deny_chmod",
49609 + .data = &grsec_enable_chroot_chmod,
49610 + .maxlen = sizeof(int),
49611 + .mode = 0600,
49612 + .proc_handler = &proc_dointvec,
49613 + },
49614 +#endif
49615 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49616 + {
49617 + .procname = "chroot_deny_mknod",
49618 + .data = &grsec_enable_chroot_mknod,
49619 + .maxlen = sizeof(int),
49620 + .mode = 0600,
49621 + .proc_handler = &proc_dointvec,
49622 + },
49623 +#endif
49624 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49625 + {
49626 + .procname = "chroot_restrict_nice",
49627 + .data = &grsec_enable_chroot_nice,
49628 + .maxlen = sizeof(int),
49629 + .mode = 0600,
49630 + .proc_handler = &proc_dointvec,
49631 + },
49632 +#endif
49633 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49634 + {
49635 + .procname = "chroot_execlog",
49636 + .data = &grsec_enable_chroot_execlog,
49637 + .maxlen = sizeof(int),
49638 + .mode = 0600,
49639 + .proc_handler = &proc_dointvec,
49640 + },
49641 +#endif
49642 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49643 + {
49644 + .procname = "chroot_caps",
49645 + .data = &grsec_enable_chroot_caps,
49646 + .maxlen = sizeof(int),
49647 + .mode = 0600,
49648 + .proc_handler = &proc_dointvec,
49649 + },
49650 +#endif
49651 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49652 + {
49653 + .procname = "chroot_deny_sysctl",
49654 + .data = &grsec_enable_chroot_sysctl,
49655 + .maxlen = sizeof(int),
49656 + .mode = 0600,
49657 + .proc_handler = &proc_dointvec,
49658 + },
49659 +#endif
49660 +#ifdef CONFIG_GRKERNSEC_TPE
49661 + {
49662 + .procname = "tpe",
49663 + .data = &grsec_enable_tpe,
49664 + .maxlen = sizeof(int),
49665 + .mode = 0600,
49666 + .proc_handler = &proc_dointvec,
49667 + },
49668 + {
49669 + .procname = "tpe_gid",
49670 + .data = &grsec_tpe_gid,
49671 + .maxlen = sizeof(int),
49672 + .mode = 0600,
49673 + .proc_handler = &proc_dointvec,
49674 + },
49675 +#endif
49676 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49677 + {
49678 + .procname = "tpe_invert",
49679 + .data = &grsec_enable_tpe_invert,
49680 + .maxlen = sizeof(int),
49681 + .mode = 0600,
49682 + .proc_handler = &proc_dointvec,
49683 + },
49684 +#endif
49685 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49686 + {
49687 + .procname = "tpe_restrict_all",
49688 + .data = &grsec_enable_tpe_all,
49689 + .maxlen = sizeof(int),
49690 + .mode = 0600,
49691 + .proc_handler = &proc_dointvec,
49692 + },
49693 +#endif
49694 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49695 + {
49696 + .procname = "socket_all",
49697 + .data = &grsec_enable_socket_all,
49698 + .maxlen = sizeof(int),
49699 + .mode = 0600,
49700 + .proc_handler = &proc_dointvec,
49701 + },
49702 + {
49703 + .procname = "socket_all_gid",
49704 + .data = &grsec_socket_all_gid,
49705 + .maxlen = sizeof(int),
49706 + .mode = 0600,
49707 + .proc_handler = &proc_dointvec,
49708 + },
49709 +#endif
49710 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49711 + {
49712 + .procname = "socket_client",
49713 + .data = &grsec_enable_socket_client,
49714 + .maxlen = sizeof(int),
49715 + .mode = 0600,
49716 + .proc_handler = &proc_dointvec,
49717 + },
49718 + {
49719 + .procname = "socket_client_gid",
49720 + .data = &grsec_socket_client_gid,
49721 + .maxlen = sizeof(int),
49722 + .mode = 0600,
49723 + .proc_handler = &proc_dointvec,
49724 + },
49725 +#endif
49726 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49727 + {
49728 + .procname = "socket_server",
49729 + .data = &grsec_enable_socket_server,
49730 + .maxlen = sizeof(int),
49731 + .mode = 0600,
49732 + .proc_handler = &proc_dointvec,
49733 + },
49734 + {
49735 + .procname = "socket_server_gid",
49736 + .data = &grsec_socket_server_gid,
49737 + .maxlen = sizeof(int),
49738 + .mode = 0600,
49739 + .proc_handler = &proc_dointvec,
49740 + },
49741 +#endif
49742 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49743 + {
49744 + .procname = "audit_group",
49745 + .data = &grsec_enable_group,
49746 + .maxlen = sizeof(int),
49747 + .mode = 0600,
49748 + .proc_handler = &proc_dointvec,
49749 + },
49750 + {
49751 + .procname = "audit_gid",
49752 + .data = &grsec_audit_gid,
49753 + .maxlen = sizeof(int),
49754 + .mode = 0600,
49755 + .proc_handler = &proc_dointvec,
49756 + },
49757 +#endif
49758 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49759 + {
49760 + .procname = "audit_chdir",
49761 + .data = &grsec_enable_chdir,
49762 + .maxlen = sizeof(int),
49763 + .mode = 0600,
49764 + .proc_handler = &proc_dointvec,
49765 + },
49766 +#endif
49767 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49768 + {
49769 + .procname = "audit_mount",
49770 + .data = &grsec_enable_mount,
49771 + .maxlen = sizeof(int),
49772 + .mode = 0600,
49773 + .proc_handler = &proc_dointvec,
49774 + },
49775 +#endif
49776 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49777 + {
49778 + .procname = "audit_textrel",
49779 + .data = &grsec_enable_audit_textrel,
49780 + .maxlen = sizeof(int),
49781 + .mode = 0600,
49782 + .proc_handler = &proc_dointvec,
49783 + },
49784 +#endif
49785 +#ifdef CONFIG_GRKERNSEC_DMESG
49786 + {
49787 + .procname = "dmesg",
49788 + .data = &grsec_enable_dmesg,
49789 + .maxlen = sizeof(int),
49790 + .mode = 0600,
49791 + .proc_handler = &proc_dointvec,
49792 + },
49793 +#endif
49794 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49795 + {
49796 + .procname = "chroot_findtask",
49797 + .data = &grsec_enable_chroot_findtask,
49798 + .maxlen = sizeof(int),
49799 + .mode = 0600,
49800 + .proc_handler = &proc_dointvec,
49801 + },
49802 +#endif
49803 +#ifdef CONFIG_GRKERNSEC_RESLOG
49804 + {
49805 + .procname = "resource_logging",
49806 + .data = &grsec_resource_logging,
49807 + .maxlen = sizeof(int),
49808 + .mode = 0600,
49809 + .proc_handler = &proc_dointvec,
49810 + },
49811 +#endif
49812 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49813 + {
49814 + .procname = "audit_ptrace",
49815 + .data = &grsec_enable_audit_ptrace,
49816 + .maxlen = sizeof(int),
49817 + .mode = 0600,
49818 + .proc_handler = &proc_dointvec,
49819 + },
49820 +#endif
49821 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49822 + {
49823 + .procname = "harden_ptrace",
49824 + .data = &grsec_enable_harden_ptrace,
49825 + .maxlen = sizeof(int),
49826 + .mode = 0600,
49827 + .proc_handler = &proc_dointvec,
49828 + },
49829 +#endif
49830 + {
49831 + .procname = "grsec_lock",
49832 + .data = &grsec_lock,
49833 + .maxlen = sizeof(int),
49834 + .mode = 0600,
49835 + .proc_handler = &proc_dointvec,
49836 + },
49837 +#endif
49838 +#ifdef CONFIG_GRKERNSEC_ROFS
49839 + {
49840 + .procname = "romount_protect",
49841 + .data = &grsec_enable_rofs,
49842 + .maxlen = sizeof(int),
49843 + .mode = 0600,
49844 + .proc_handler = &proc_dointvec_minmax,
49845 + .extra1 = &one,
49846 + .extra2 = &one,
49847 + },
49848 +#endif
49849 + { }
49850 +};
49851 +#endif
49852 diff -urNp linux-2.6.39.4/grsecurity/grsec_time.c linux-2.6.39.4/grsecurity/grsec_time.c
49853 --- linux-2.6.39.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49854 +++ linux-2.6.39.4/grsecurity/grsec_time.c 2011-08-05 19:44:37.000000000 -0400
49855 @@ -0,0 +1,16 @@
49856 +#include <linux/kernel.h>
49857 +#include <linux/sched.h>
49858 +#include <linux/grinternal.h>
49859 +#include <linux/module.h>
49860 +
49861 +void
49862 +gr_log_timechange(void)
49863 +{
49864 +#ifdef CONFIG_GRKERNSEC_TIME
49865 + if (grsec_enable_time)
49866 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49867 +#endif
49868 + return;
49869 +}
49870 +
49871 +EXPORT_SYMBOL(gr_log_timechange);
49872 diff -urNp linux-2.6.39.4/grsecurity/grsec_tpe.c linux-2.6.39.4/grsecurity/grsec_tpe.c
49873 --- linux-2.6.39.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49874 +++ linux-2.6.39.4/grsecurity/grsec_tpe.c 2011-08-05 19:44:37.000000000 -0400
49875 @@ -0,0 +1,39 @@
49876 +#include <linux/kernel.h>
49877 +#include <linux/sched.h>
49878 +#include <linux/file.h>
49879 +#include <linux/fs.h>
49880 +#include <linux/grinternal.h>
49881 +
49882 +extern int gr_acl_tpe_check(void);
49883 +
49884 +int
49885 +gr_tpe_allow(const struct file *file)
49886 +{
49887 +#ifdef CONFIG_GRKERNSEC
49888 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49889 + const struct cred *cred = current_cred();
49890 +
49891 + if (cred->uid && ((grsec_enable_tpe &&
49892 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49893 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49894 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49895 +#else
49896 + in_group_p(grsec_tpe_gid)
49897 +#endif
49898 + ) || gr_acl_tpe_check()) &&
49899 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49900 + (inode->i_mode & S_IWOTH))))) {
49901 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49902 + return 0;
49903 + }
49904 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49905 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49906 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49907 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49908 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49909 + return 0;
49910 + }
49911 +#endif
49912 +#endif
49913 + return 1;
49914 +}
49915 diff -urNp linux-2.6.39.4/grsecurity/grsum.c linux-2.6.39.4/grsecurity/grsum.c
49916 --- linux-2.6.39.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
49917 +++ linux-2.6.39.4/grsecurity/grsum.c 2011-08-05 19:44:37.000000000 -0400
49918 @@ -0,0 +1,61 @@
49919 +#include <linux/err.h>
49920 +#include <linux/kernel.h>
49921 +#include <linux/sched.h>
49922 +#include <linux/mm.h>
49923 +#include <linux/scatterlist.h>
49924 +#include <linux/crypto.h>
49925 +#include <linux/gracl.h>
49926 +
49927 +
49928 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
49929 +#error "crypto and sha256 must be built into the kernel"
49930 +#endif
49931 +
49932 +int
49933 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
49934 +{
49935 + char *p;
49936 + struct crypto_hash *tfm;
49937 + struct hash_desc desc;
49938 + struct scatterlist sg;
49939 + unsigned char temp_sum[GR_SHA_LEN];
49940 + volatile int retval = 0;
49941 + volatile int dummy = 0;
49942 + unsigned int i;
49943 +
49944 + sg_init_table(&sg, 1);
49945 +
49946 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
49947 + if (IS_ERR(tfm)) {
49948 + /* should never happen, since sha256 should be built in */
49949 + return 1;
49950 + }
49951 +
49952 + desc.tfm = tfm;
49953 + desc.flags = 0;
49954 +
49955 + crypto_hash_init(&desc);
49956 +
49957 + p = salt;
49958 + sg_set_buf(&sg, p, GR_SALT_LEN);
49959 + crypto_hash_update(&desc, &sg, sg.length);
49960 +
49961 + p = entry->pw;
49962 + sg_set_buf(&sg, p, strlen(p));
49963 +
49964 + crypto_hash_update(&desc, &sg, sg.length);
49965 +
49966 + crypto_hash_final(&desc, temp_sum);
49967 +
49968 + memset(entry->pw, 0, GR_PW_LEN);
49969 +
49970 + for (i = 0; i < GR_SHA_LEN; i++)
49971 + if (sum[i] != temp_sum[i])
49972 + retval = 1;
49973 + else
49974 + dummy = 1; // waste a cycle
49975 +
49976 + crypto_free_hash(tfm);
49977 +
49978 + return retval;
49979 +}
49980 diff -urNp linux-2.6.39.4/grsecurity/Kconfig linux-2.6.39.4/grsecurity/Kconfig
49981 --- linux-2.6.39.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
49982 +++ linux-2.6.39.4/grsecurity/Kconfig 2011-08-05 19:44:37.000000000 -0400
49983 @@ -0,0 +1,1048 @@
49984 +#
49985 +# grecurity configuration
49986 +#
49987 +
49988 +menu "Grsecurity"
49989 +
49990 +config GRKERNSEC
49991 + bool "Grsecurity"
49992 + select CRYPTO
49993 + select CRYPTO_SHA256
49994 + help
49995 + If you say Y here, you will be able to configure many features
49996 + that will enhance the security of your system. It is highly
49997 + recommended that you say Y here and read through the help
49998 + for each option so that you fully understand the features and
49999 + can evaluate their usefulness for your machine.
50000 +
50001 +choice
50002 + prompt "Security Level"
50003 + depends on GRKERNSEC
50004 + default GRKERNSEC_CUSTOM
50005 +
50006 +config GRKERNSEC_LOW
50007 + bool "Low"
50008 + select GRKERNSEC_LINK
50009 + select GRKERNSEC_FIFO
50010 + select GRKERNSEC_EXECVE
50011 + select GRKERNSEC_RANDNET
50012 + select GRKERNSEC_DMESG
50013 + select GRKERNSEC_CHROOT
50014 + select GRKERNSEC_CHROOT_CHDIR
50015 +
50016 + help
50017 + If you choose this option, several of the grsecurity options will
50018 + be enabled that will give you greater protection against a number
50019 + of attacks, while assuring that none of your software will have any
50020 + conflicts with the additional security measures. If you run a lot
50021 + of unusual software, or you are having problems with the higher
50022 + security levels, you should say Y here. With this option, the
50023 + following features are enabled:
50024 +
50025 + - Linking restrictions
50026 + - FIFO restrictions
50027 + - Enforcing RLIMIT_NPROC on execve
50028 + - Restricted dmesg
50029 + - Enforced chdir("/") on chroot
50030 + - Runtime module disabling
50031 +
50032 +config GRKERNSEC_MEDIUM
50033 + bool "Medium"
50034 + select PAX
50035 + select PAX_EI_PAX
50036 + select PAX_PT_PAX_FLAGS
50037 + select PAX_HAVE_ACL_FLAGS
50038 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50039 + select GRKERNSEC_CHROOT
50040 + select GRKERNSEC_CHROOT_SYSCTL
50041 + select GRKERNSEC_LINK
50042 + select GRKERNSEC_FIFO
50043 + select GRKERNSEC_EXECVE
50044 + select GRKERNSEC_DMESG
50045 + select GRKERNSEC_RANDNET
50046 + select GRKERNSEC_FORKFAIL
50047 + select GRKERNSEC_TIME
50048 + select GRKERNSEC_SIGNAL
50049 + select GRKERNSEC_CHROOT
50050 + select GRKERNSEC_CHROOT_UNIX
50051 + select GRKERNSEC_CHROOT_MOUNT
50052 + select GRKERNSEC_CHROOT_PIVOT
50053 + select GRKERNSEC_CHROOT_DOUBLE
50054 + select GRKERNSEC_CHROOT_CHDIR
50055 + select GRKERNSEC_CHROOT_MKNOD
50056 + select GRKERNSEC_PROC
50057 + select GRKERNSEC_PROC_USERGROUP
50058 + select PAX_RANDUSTACK
50059 + select PAX_ASLR
50060 + select PAX_RANDMMAP
50061 + select PAX_REFCOUNT if (X86 || SPARC64)
50062 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
50063 +
50064 + help
50065 + If you say Y here, several features in addition to those included
50066 + in the low additional security level will be enabled. These
50067 + features provide even more security to your system, though in rare
50068 + cases they may be incompatible with very old or poorly written
50069 + software. If you enable this option, make sure that your auth
50070 + service (identd) is running as gid 1001. With this option,
50071 + the following features (in addition to those provided in the
50072 + low additional security level) will be enabled:
50073 +
50074 + - Failed fork logging
50075 + - Time change logging
50076 + - Signal logging
50077 + - Deny mounts in chroot
50078 + - Deny double chrooting
50079 + - Deny sysctl writes in chroot
50080 + - Deny mknod in chroot
50081 + - Deny access to abstract AF_UNIX sockets out of chroot
50082 + - Deny pivot_root in chroot
50083 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
50084 + - /proc restrictions with special GID set to 10 (usually wheel)
50085 + - Address Space Layout Randomization (ASLR)
50086 + - Prevent exploitation of most refcount overflows
50087 + - Bounds checking of copying between the kernel and userland
50088 +
50089 +config GRKERNSEC_HIGH
50090 + bool "High"
50091 + select GRKERNSEC_LINK
50092 + select GRKERNSEC_FIFO
50093 + select GRKERNSEC_EXECVE
50094 + select GRKERNSEC_DMESG
50095 + select GRKERNSEC_FORKFAIL
50096 + select GRKERNSEC_TIME
50097 + select GRKERNSEC_SIGNAL
50098 + select GRKERNSEC_CHROOT
50099 + select GRKERNSEC_CHROOT_SHMAT
50100 + select GRKERNSEC_CHROOT_UNIX
50101 + select GRKERNSEC_CHROOT_MOUNT
50102 + select GRKERNSEC_CHROOT_FCHDIR
50103 + select GRKERNSEC_CHROOT_PIVOT
50104 + select GRKERNSEC_CHROOT_DOUBLE
50105 + select GRKERNSEC_CHROOT_CHDIR
50106 + select GRKERNSEC_CHROOT_MKNOD
50107 + select GRKERNSEC_CHROOT_CAPS
50108 + select GRKERNSEC_CHROOT_SYSCTL
50109 + select GRKERNSEC_CHROOT_FINDTASK
50110 + select GRKERNSEC_SYSFS_RESTRICT
50111 + select GRKERNSEC_PROC
50112 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50113 + select GRKERNSEC_HIDESYM
50114 + select GRKERNSEC_BRUTE
50115 + select GRKERNSEC_PROC_USERGROUP
50116 + select GRKERNSEC_KMEM
50117 + select GRKERNSEC_RESLOG
50118 + select GRKERNSEC_RANDNET
50119 + select GRKERNSEC_PROC_ADD
50120 + select GRKERNSEC_CHROOT_CHMOD
50121 + select GRKERNSEC_CHROOT_NICE
50122 + select GRKERNSEC_AUDIT_MOUNT
50123 + select GRKERNSEC_MODHARDEN if (MODULES)
50124 + select GRKERNSEC_HARDEN_PTRACE
50125 + select GRKERNSEC_VM86 if (X86_32)
50126 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50127 + select PAX
50128 + select PAX_RANDUSTACK
50129 + select PAX_ASLR
50130 + select PAX_RANDMMAP
50131 + select PAX_NOEXEC
50132 + select PAX_MPROTECT
50133 + select PAX_EI_PAX
50134 + select PAX_PT_PAX_FLAGS
50135 + select PAX_HAVE_ACL_FLAGS
50136 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50137 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
50138 + select PAX_RANDKSTACK if (X86_TSC && X86)
50139 + select PAX_SEGMEXEC if (X86_32)
50140 + select PAX_PAGEEXEC
50141 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50142 + select PAX_EMUTRAMP if (PARISC)
50143 + select PAX_EMUSIGRT if (PARISC)
50144 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50145 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50146 + select PAX_REFCOUNT if (X86 || SPARC64)
50147 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50148 + help
50149 + If you say Y here, many of the features of grsecurity will be
50150 + enabled, which will protect you against many kinds of attacks
50151 + against your system. The heightened security comes at a cost
50152 + of an increased chance of incompatibilities with rare software
50153 + on your machine. Since this security level enables PaX, you should
50154 + view <http://pax.grsecurity.net> and read about the PaX
50155 + project. While you are there, download chpax and run it on
50156 + binaries that cause problems with PaX. Also remember that
50157 + since the /proc restrictions are enabled, you must run your
50158 + identd as gid 1001. This security level enables the following
50159 + features in addition to those listed in the low and medium
50160 + security levels:
50161 +
50162 + - Additional /proc restrictions
50163 + - Chmod restrictions in chroot
50164 + - No signals, ptrace, or viewing of processes outside of chroot
50165 + - Capability restrictions in chroot
50166 + - Deny fchdir out of chroot
50167 + - Priority restrictions in chroot
50168 + - Segmentation-based implementation of PaX
50169 + - Mprotect restrictions
50170 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50171 + - Kernel stack randomization
50172 + - Mount/unmount/remount logging
50173 + - Kernel symbol hiding
50174 + - Prevention of memory exhaustion-based exploits
50175 + - Hardening of module auto-loading
50176 + - Ptrace restrictions
50177 + - Restricted vm86 mode
50178 + - Restricted sysfs/debugfs
50179 + - Active kernel exploit response
50180 +
50181 +config GRKERNSEC_CUSTOM
50182 + bool "Custom"
50183 + help
50184 + If you say Y here, you will be able to configure every grsecurity
50185 + option, which allows you to enable many more features that aren't
50186 + covered in the basic security levels. These additional features
50187 + include TPE, socket restrictions, and the sysctl system for
50188 + grsecurity. It is advised that you read through the help for
50189 + each option to determine its usefulness in your situation.
50190 +
50191 +endchoice
50192 +
50193 +menu "Address Space Protection"
50194 +depends on GRKERNSEC
50195 +
50196 +config GRKERNSEC_KMEM
50197 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50198 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50199 + help
50200 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50201 + be written to via mmap or otherwise to modify the running kernel.
50202 + /dev/port will also not be allowed to be opened. If you have module
50203 + support disabled, enabling this will close up four ways that are
50204 + currently used to insert malicious code into the running kernel.
50205 + Even with all these features enabled, we still highly recommend that
50206 + you use the RBAC system, as it is still possible for an attacker to
50207 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50208 + If you are not using XFree86, you may be able to stop this additional
50209 + case by enabling the 'Disable privileged I/O' option. Though nothing
50210 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50211 + but only to video memory, which is the only writing we allow in this
50212 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50213 + not be allowed to mprotect it with PROT_WRITE later.
50214 + It is highly recommended that you say Y here if you meet all the
50215 + conditions above.
50216 +
50217 +config GRKERNSEC_VM86
50218 + bool "Restrict VM86 mode"
50219 + depends on X86_32
50220 +
50221 + help
50222 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50223 + make use of a special execution mode on 32bit x86 processors called
50224 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50225 + video cards and will still work with this option enabled. The purpose
50226 + of the option is to prevent exploitation of emulation errors in
50227 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50228 + Nearly all users should be able to enable this option.
50229 +
50230 +config GRKERNSEC_IO
50231 + bool "Disable privileged I/O"
50232 + depends on X86
50233 + select RTC_CLASS
50234 + select RTC_INTF_DEV
50235 + select RTC_DRV_CMOS
50236 +
50237 + help
50238 + If you say Y here, all ioperm and iopl calls will return an error.
50239 + Ioperm and iopl can be used to modify the running kernel.
50240 + Unfortunately, some programs need this access to operate properly,
50241 + the most notable of which are XFree86 and hwclock. hwclock can be
50242 + remedied by having RTC support in the kernel, so real-time
50243 + clock support is enabled if this option is enabled, to ensure
50244 + that hwclock operates correctly. XFree86 still will not
50245 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50246 + IF YOU USE XFree86. If you use XFree86 and you still want to
50247 + protect your kernel against modification, use the RBAC system.
50248 +
50249 +config GRKERNSEC_PROC_MEMMAP
50250 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50251 + default y if (PAX_NOEXEC || PAX_ASLR)
50252 + depends on PAX_NOEXEC || PAX_ASLR
50253 + help
50254 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50255 + give no information about the addresses of its mappings if
50256 + PaX features that rely on random addresses are enabled on the task.
50257 + If you use PaX it is greatly recommended that you say Y here as it
50258 + closes up a hole that makes the full ASLR useless for suid
50259 + binaries.
50260 +
50261 +config GRKERNSEC_BRUTE
50262 + bool "Deter exploit bruteforcing"
50263 + help
50264 + If you say Y here, attempts to bruteforce exploits against forking
50265 + daemons such as apache or sshd, as well as against suid/sgid binaries
50266 + will be deterred. When a child of a forking daemon is killed by PaX
50267 + or crashes due to an illegal instruction or other suspicious signal,
50268 + the parent process will be delayed 30 seconds upon every subsequent
50269 + fork until the administrator is able to assess the situation and
50270 + restart the daemon.
50271 + In the suid/sgid case, the attempt is logged, the user has all their
50272 + processes terminated, and they are prevented from executing any further
50273 + processes for 15 minutes.
50274 + It is recommended that you also enable signal logging in the auditing
50275 + section so that logs are generated when a process triggers a suspicious
50276 + signal.
50277 + If the sysctl option is enabled, a sysctl option with name
50278 + "deter_bruteforce" is created.
50279 +
50280 +
50281 +config GRKERNSEC_MODHARDEN
50282 + bool "Harden module auto-loading"
50283 + depends on MODULES
50284 + help
50285 + If you say Y here, module auto-loading in response to use of some
50286 + feature implemented by an unloaded module will be restricted to
50287 + root users. Enabling this option helps defend against attacks
50288 + by unprivileged users who abuse the auto-loading behavior to
50289 + cause a vulnerable module to load that is then exploited.
50290 +
50291 + If this option prevents a legitimate use of auto-loading for a
50292 + non-root user, the administrator can execute modprobe manually
50293 + with the exact name of the module mentioned in the alert log.
50294 + Alternatively, the administrator can add the module to the list
50295 + of modules loaded at boot by modifying init scripts.
50296 +
50297 + Modification of init scripts will most likely be needed on
50298 + Ubuntu servers with encrypted home directory support enabled,
50299 + as the first non-root user logging in will cause the ecb(aes),
50300 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50301 +
50302 +config GRKERNSEC_HIDESYM
50303 + bool "Hide kernel symbols"
50304 + help
50305 + If you say Y here, getting information on loaded modules, and
50306 + displaying all kernel symbols through a syscall will be restricted
50307 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50308 + /proc/kallsyms will be restricted to the root user. The RBAC
50309 + system can hide that entry even from root.
50310 +
50311 + This option also prevents leaking of kernel addresses through
50312 + several /proc entries.
50313 +
50314 + Note that this option is only effective provided the following
50315 + conditions are met:
50316 + 1) The kernel using grsecurity is not precompiled by some distribution
50317 + 2) You have also enabled GRKERNSEC_DMESG
50318 + 3) You are using the RBAC system and hiding other files such as your
50319 + kernel image and System.map. Alternatively, enabling this option
50320 + causes the permissions on /boot, /lib/modules, and the kernel
50321 + source directory to change at compile time to prevent
50322 + reading by non-root users.
50323 + If the above conditions are met, this option will aid in providing a
50324 + useful protection against local kernel exploitation of overflows
50325 + and arbitrary read/write vulnerabilities.
50326 +
50327 +config GRKERNSEC_KERN_LOCKOUT
50328 + bool "Active kernel exploit response"
50329 + depends on X86 || ARM || PPC || SPARC
50330 + help
50331 + If you say Y here, when a PaX alert is triggered due to suspicious
50332 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50333 + or an OOPs occurs due to bad memory accesses, instead of just
50334 + terminating the offending process (and potentially allowing
50335 + a subsequent exploit from the same user), we will take one of two
50336 + actions:
50337 + If the user was root, we will panic the system
50338 + If the user was non-root, we will log the attempt, terminate
50339 + all processes owned by the user, then prevent them from creating
50340 + any new processes until the system is restarted
50341 + This deters repeated kernel exploitation/bruteforcing attempts
50342 + and is useful for later forensics.
50343 +
50344 +endmenu
50345 +menu "Role Based Access Control Options"
50346 +depends on GRKERNSEC
50347 +
50348 +config GRKERNSEC_RBAC_DEBUG
50349 + bool
50350 +
50351 +config GRKERNSEC_NO_RBAC
50352 + bool "Disable RBAC system"
50353 + help
50354 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50355 + preventing the RBAC system from being enabled. You should only say Y
50356 + here if you have no intention of using the RBAC system, so as to prevent
50357 + an attacker with root access from misusing the RBAC system to hide files
50358 + and processes when loadable module support and /dev/[k]mem have been
50359 + locked down.
50360 +
50361 +config GRKERNSEC_ACL_HIDEKERN
50362 + bool "Hide kernel processes"
50363 + help
50364 + If you say Y here, all kernel threads will be hidden to all
50365 + processes but those whose subject has the "view hidden processes"
50366 + flag.
50367 +
50368 +config GRKERNSEC_ACL_MAXTRIES
50369 + int "Maximum tries before password lockout"
50370 + default 3
50371 + help
50372 + This option enforces the maximum number of times a user can attempt
50373 + to authorize themselves with the grsecurity RBAC system before being
50374 + denied the ability to attempt authorization again for a specified time.
50375 + The lower the number, the harder it will be to brute-force a password.
50376 +
50377 +config GRKERNSEC_ACL_TIMEOUT
50378 + int "Time to wait after max password tries, in seconds"
50379 + default 30
50380 + help
50381 + This option specifies the time the user must wait after attempting to
50382 + authorize to the RBAC system with the maximum number of invalid
50383 + passwords. The higher the number, the harder it will be to brute-force
50384 + a password.
50385 +
50386 +endmenu
50387 +menu "Filesystem Protections"
50388 +depends on GRKERNSEC
50389 +
50390 +config GRKERNSEC_PROC
50391 + bool "Proc restrictions"
50392 + help
50393 + If you say Y here, the permissions of the /proc filesystem
50394 + will be altered to enhance system security and privacy. You MUST
50395 + choose either a user only restriction or a user and group restriction.
50396 + Depending upon the option you choose, you can either restrict users to
50397 + see only the processes they themselves run, or choose a group that can
50398 + view all processes and files normally restricted to root if you choose
50399 + the "restrict to user only" option. NOTE: If you're running identd as
50400 + a non-root user, you will have to run it as the group you specify here.
50401 +
50402 +config GRKERNSEC_PROC_USER
50403 + bool "Restrict /proc to user only"
50404 + depends on GRKERNSEC_PROC
50405 + help
50406 + If you say Y here, non-root users will only be able to view their own
50407 + processes, and restricts them from viewing network-related information,
50408 + and viewing kernel symbol and module information.
50409 +
50410 +config GRKERNSEC_PROC_USERGROUP
50411 + bool "Allow special group"
50412 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50413 + help
50414 + If you say Y here, you will be able to select a group that will be
50415 + able to view all processes and network-related information. If you've
50416 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50417 + remain hidden. This option is useful if you want to run identd as
50418 + a non-root user.
50419 +
50420 +config GRKERNSEC_PROC_GID
50421 + int "GID for special group"
50422 + depends on GRKERNSEC_PROC_USERGROUP
50423 + default 1001
50424 +
50425 +config GRKERNSEC_PROC_ADD
50426 + bool "Additional restrictions"
50427 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50428 + help
50429 + If you say Y here, additional restrictions will be placed on
50430 + /proc that keep normal users from viewing device information and
50431 + slabinfo information that could be useful for exploits.
50432 +
50433 +config GRKERNSEC_LINK
50434 + bool "Linking restrictions"
50435 + help
50436 + If you say Y here, /tmp race exploits will be prevented, since users
50437 + will no longer be able to follow symlinks owned by other users in
50438 + world-writable +t directories (e.g. /tmp), unless the owner of the
50439 + symlink is the owner of the directory. users will also not be
50440 + able to hardlink to files they do not own. If the sysctl option is
50441 + enabled, a sysctl option with name "linking_restrictions" is created.
50442 +
50443 +config GRKERNSEC_FIFO
50444 + bool "FIFO restrictions"
50445 + help
50446 + If you say Y here, users will not be able to write to FIFOs they don't
50447 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50448 + the FIFO is the same owner of the directory it's held in. If the sysctl
50449 + option is enabled, a sysctl option with name "fifo_restrictions" is
50450 + created.
50451 +
50452 +config GRKERNSEC_SYSFS_RESTRICT
50453 + bool "Sysfs/debugfs restriction"
50454 + depends on SYSFS
50455 + help
50456 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50457 + any filesystem normally mounted under it (e.g. debugfs) will only
50458 + be accessible by root. These filesystems generally provide access
50459 + to hardware and debug information that isn't appropriate for unprivileged
50460 + users of the system. Sysfs and debugfs have also become a large source
50461 + of new vulnerabilities, ranging from infoleaks to local compromise.
50462 + There has been very little oversight with an eye toward security involved
50463 + in adding new exporters of information to these filesystems, so their
50464 + use is discouraged.
50465 + This option is equivalent to a chmod 0700 of the mount paths.
50466 +
50467 +config GRKERNSEC_ROFS
50468 + bool "Runtime read-only mount protection"
50469 + help
50470 + If you say Y here, a sysctl option with name "romount_protect" will
50471 + be created. By setting this option to 1 at runtime, filesystems
50472 + will be protected in the following ways:
50473 + * No new writable mounts will be allowed
50474 + * Existing read-only mounts won't be able to be remounted read/write
50475 + * Write operations will be denied on all block devices
50476 + This option acts independently of grsec_lock: once it is set to 1,
50477 + it cannot be turned off. Therefore, please be mindful of the resulting
50478 + behavior if this option is enabled in an init script on a read-only
50479 + filesystem. This feature is mainly intended for secure embedded systems.
50480 +
50481 +config GRKERNSEC_CHROOT
50482 + bool "Chroot jail restrictions"
50483 + help
50484 + If you say Y here, you will be able to choose several options that will
50485 + make breaking out of a chrooted jail much more difficult. If you
50486 + encounter no software incompatibilities with the following options, it
50487 + is recommended that you enable each one.
50488 +
50489 +config GRKERNSEC_CHROOT_MOUNT
50490 + bool "Deny mounts"
50491 + depends on GRKERNSEC_CHROOT
50492 + help
50493 + If you say Y here, processes inside a chroot will not be able to
50494 + mount or remount filesystems. If the sysctl option is enabled, a
50495 + sysctl option with name "chroot_deny_mount" is created.
50496 +
50497 +config GRKERNSEC_CHROOT_DOUBLE
50498 + bool "Deny double-chroots"
50499 + depends on GRKERNSEC_CHROOT
50500 + help
50501 + If you say Y here, processes inside a chroot will not be able to chroot
50502 + again outside the chroot. This is a widely used method of breaking
50503 + out of a chroot jail and should not be allowed. If the sysctl
50504 + option is enabled, a sysctl option with name
50505 + "chroot_deny_chroot" is created.
50506 +
50507 +config GRKERNSEC_CHROOT_PIVOT
50508 + bool "Deny pivot_root in chroot"
50509 + depends on GRKERNSEC_CHROOT
50510 + help
50511 + If you say Y here, processes inside a chroot will not be able to use
50512 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50513 + works similar to chroot in that it changes the root filesystem. This
50514 + function could be misused in a chrooted process to attempt to break out
50515 + of the chroot, and therefore should not be allowed. If the sysctl
50516 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50517 + created.
50518 +
50519 +config GRKERNSEC_CHROOT_CHDIR
50520 + bool "Enforce chdir(\"/\") on all chroots"
50521 + depends on GRKERNSEC_CHROOT
50522 + help
50523 + If you say Y here, the current working directory of all newly-chrooted
50524 + applications will be set to the the root directory of the chroot.
50525 + The man page on chroot(2) states:
50526 + Note that this call does not change the current working
50527 + directory, so that `.' can be outside the tree rooted at
50528 + `/'. In particular, the super-user can escape from a
50529 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50530 +
50531 + It is recommended that you say Y here, since it's not known to break
50532 + any software. If the sysctl option is enabled, a sysctl option with
50533 + name "chroot_enforce_chdir" is created.
50534 +
50535 +config GRKERNSEC_CHROOT_CHMOD
50536 + bool "Deny (f)chmod +s"
50537 + depends on GRKERNSEC_CHROOT
50538 + help
50539 + If you say Y here, processes inside a chroot will not be able to chmod
50540 + or fchmod files to make them have suid or sgid bits. This protects
50541 + against another published method of breaking a chroot. If the sysctl
50542 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50543 + created.
50544 +
50545 +config GRKERNSEC_CHROOT_FCHDIR
50546 + bool "Deny fchdir out of chroot"
50547 + depends on GRKERNSEC_CHROOT
50548 + help
50549 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50550 + to a file descriptor of the chrooting process that points to a directory
50551 + outside the filesystem will be stopped. If the sysctl option
50552 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50553 +
50554 +config GRKERNSEC_CHROOT_MKNOD
50555 + bool "Deny mknod"
50556 + depends on GRKERNSEC_CHROOT
50557 + help
50558 + If you say Y here, processes inside a chroot will not be allowed to
50559 + mknod. The problem with using mknod inside a chroot is that it
50560 + would allow an attacker to create a device entry that is the same
50561 + as one on the physical root of your system, which could range from
50562 + anything from the console device to a device for your harddrive (which
50563 + they could then use to wipe the drive or steal data). It is recommended
50564 + that you say Y here, unless you run into software incompatibilities.
50565 + If the sysctl option is enabled, a sysctl option with name
50566 + "chroot_deny_mknod" is created.
50567 +
50568 +config GRKERNSEC_CHROOT_SHMAT
50569 + bool "Deny shmat() out of chroot"
50570 + depends on GRKERNSEC_CHROOT
50571 + help
50572 + If you say Y here, processes inside a chroot will not be able to attach
50573 + to shared memory segments that were created outside of the chroot jail.
50574 + It is recommended that you say Y here. If the sysctl option is enabled,
50575 + a sysctl option with name "chroot_deny_shmat" is created.
50576 +
50577 +config GRKERNSEC_CHROOT_UNIX
50578 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50579 + depends on GRKERNSEC_CHROOT
50580 + help
50581 + If you say Y here, processes inside a chroot will not be able to
50582 + connect to abstract (meaning not belonging to a filesystem) Unix
50583 + domain sockets that were bound outside of a chroot. It is recommended
50584 + that you say Y here. If the sysctl option is enabled, a sysctl option
50585 + with name "chroot_deny_unix" is created.
50586 +
50587 +config GRKERNSEC_CHROOT_FINDTASK
50588 + bool "Protect outside processes"
50589 + depends on GRKERNSEC_CHROOT
50590 + help
50591 + If you say Y here, processes inside a chroot will not be able to
50592 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50593 + getsid, or view any process outside of the chroot. If the sysctl
50594 + option is enabled, a sysctl option with name "chroot_findtask" is
50595 + created.
50596 +
50597 +config GRKERNSEC_CHROOT_NICE
50598 + bool "Restrict priority changes"
50599 + depends on GRKERNSEC_CHROOT
50600 + help
50601 + If you say Y here, processes inside a chroot will not be able to raise
50602 + the priority of processes in the chroot, or alter the priority of
50603 + processes outside the chroot. This provides more security than simply
50604 + removing CAP_SYS_NICE from the process' capability set. If the
50605 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50606 + is created.
50607 +
50608 +config GRKERNSEC_CHROOT_SYSCTL
50609 + bool "Deny sysctl writes"
50610 + depends on GRKERNSEC_CHROOT
50611 + help
50612 + If you say Y here, an attacker in a chroot will not be able to
50613 + write to sysctl entries, either by sysctl(2) or through a /proc
50614 + interface. It is strongly recommended that you say Y here. If the
50615 + sysctl option is enabled, a sysctl option with name
50616 + "chroot_deny_sysctl" is created.
50617 +
50618 +config GRKERNSEC_CHROOT_CAPS
50619 + bool "Capability restrictions"
50620 + depends on GRKERNSEC_CHROOT
50621 + help
50622 + If you say Y here, the capabilities on all root processes within a
50623 + chroot jail will be lowered to stop module insertion, raw i/o,
50624 + system and net admin tasks, rebooting the system, modifying immutable
50625 + files, modifying IPC owned by another, and changing the system time.
50626 + This is left an option because it can break some apps. Disable this
50627 + if your chrooted apps are having problems performing those kinds of
50628 + tasks. If the sysctl option is enabled, a sysctl option with
50629 + name "chroot_caps" is created.
50630 +
50631 +endmenu
50632 +menu "Kernel Auditing"
50633 +depends on GRKERNSEC
50634 +
50635 +config GRKERNSEC_AUDIT_GROUP
50636 + bool "Single group for auditing"
50637 + help
50638 + If you say Y here, the exec, chdir, and (un)mount logging features
50639 + will only operate on a group you specify. This option is recommended
50640 + if you only want to watch certain users instead of having a large
50641 + amount of logs from the entire system. If the sysctl option is enabled,
50642 + a sysctl option with name "audit_group" is created.
50643 +
50644 +config GRKERNSEC_AUDIT_GID
50645 + int "GID for auditing"
50646 + depends on GRKERNSEC_AUDIT_GROUP
50647 + default 1007
50648 +
50649 +config GRKERNSEC_EXECLOG
50650 + bool "Exec logging"
50651 + help
50652 + If you say Y here, all execve() calls will be logged (since the
50653 + other exec*() calls are frontends to execve(), all execution
50654 + will be logged). Useful for shell-servers that like to keep track
50655 + of their users. If the sysctl option is enabled, a sysctl option with
50656 + name "exec_logging" is created.
50657 + WARNING: This option when enabled will produce a LOT of logs, especially
50658 + on an active system.
50659 +
50660 +config GRKERNSEC_RESLOG
50661 + bool "Resource logging"
50662 + help
50663 + If you say Y here, all attempts to overstep resource limits will
50664 + be logged with the resource name, the requested size, and the current
50665 + limit. It is highly recommended that you say Y here. If the sysctl
50666 + option is enabled, a sysctl option with name "resource_logging" is
50667 + created. If the RBAC system is enabled, the sysctl value is ignored.
50668 +
50669 +config GRKERNSEC_CHROOT_EXECLOG
50670 + bool "Log execs within chroot"
50671 + help
50672 + If you say Y here, all executions inside a chroot jail will be logged
50673 + to syslog. This can cause a large amount of logs if certain
50674 + applications (eg. djb's daemontools) are installed on the system, and
50675 + is therefore left as an option. If the sysctl option is enabled, a
50676 + sysctl option with name "chroot_execlog" is created.
50677 +
50678 +config GRKERNSEC_AUDIT_PTRACE
50679 + bool "Ptrace logging"
50680 + help
50681 + If you say Y here, all attempts to attach to a process via ptrace
50682 + will be logged. If the sysctl option is enabled, a sysctl option
50683 + with name "audit_ptrace" is created.
50684 +
50685 +config GRKERNSEC_AUDIT_CHDIR
50686 + bool "Chdir logging"
50687 + help
50688 + If you say Y here, all chdir() calls will be logged. If the sysctl
50689 + option is enabled, a sysctl option with name "audit_chdir" is created.
50690 +
50691 +config GRKERNSEC_AUDIT_MOUNT
50692 + bool "(Un)Mount logging"
50693 + help
50694 + If you say Y here, all mounts and unmounts will be logged. If the
50695 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50696 + created.
50697 +
50698 +config GRKERNSEC_SIGNAL
50699 + bool "Signal logging"
50700 + help
50701 + If you say Y here, certain important signals will be logged, such as
50702 + SIGSEGV, which will as a result inform you of when a error in a program
50703 + occurred, which in some cases could mean a possible exploit attempt.
50704 + If the sysctl option is enabled, a sysctl option with name
50705 + "signal_logging" is created.
50706 +
50707 +config GRKERNSEC_FORKFAIL
50708 + bool "Fork failure logging"
50709 + help
50710 + If you say Y here, all failed fork() attempts will be logged.
50711 + This could suggest a fork bomb, or someone attempting to overstep
50712 + their process limit. If the sysctl option is enabled, a sysctl option
50713 + with name "forkfail_logging" is created.
50714 +
50715 +config GRKERNSEC_TIME
50716 + bool "Time change logging"
50717 + help
50718 + If you say Y here, any changes of the system clock will be logged.
50719 + If the sysctl option is enabled, a sysctl option with name
50720 + "timechange_logging" is created.
50721 +
50722 +config GRKERNSEC_PROC_IPADDR
50723 + bool "/proc/<pid>/ipaddr support"
50724 + help
50725 + If you say Y here, a new entry will be added to each /proc/<pid>
50726 + directory that contains the IP address of the person using the task.
50727 + The IP is carried across local TCP and AF_UNIX stream sockets.
50728 + This information can be useful for IDS/IPSes to perform remote response
50729 + to a local attack. The entry is readable by only the owner of the
50730 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50731 + the RBAC system), and thus does not create privacy concerns.
50732 +
50733 +config GRKERNSEC_RWXMAP_LOG
50734 + bool 'Denied RWX mmap/mprotect logging'
50735 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50736 + help
50737 + If you say Y here, calls to mmap() and mprotect() with explicit
50738 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50739 + denied by the PAX_MPROTECT feature. If the sysctl option is
50740 + enabled, a sysctl option with name "rwxmap_logging" is created.
50741 +
50742 +config GRKERNSEC_AUDIT_TEXTREL
50743 + bool 'ELF text relocations logging (READ HELP)'
50744 + depends on PAX_MPROTECT
50745 + help
50746 + If you say Y here, text relocations will be logged with the filename
50747 + of the offending library or binary. The purpose of the feature is
50748 + to help Linux distribution developers get rid of libraries and
50749 + binaries that need text relocations which hinder the future progress
50750 + of PaX. Only Linux distribution developers should say Y here, and
50751 + never on a production machine, as this option creates an information
50752 + leak that could aid an attacker in defeating the randomization of
50753 + a single memory region. If the sysctl option is enabled, a sysctl
50754 + option with name "audit_textrel" is created.
50755 +
50756 +endmenu
50757 +
50758 +menu "Executable Protections"
50759 +depends on GRKERNSEC
50760 +
50761 +config GRKERNSEC_EXECVE
50762 + bool "Enforce RLIMIT_NPROC on execs"
50763 + help
50764 + If you say Y here, users with a resource limit on processes will
50765 + have the value checked during execve() calls. The current system
50766 + only checks the system limit during fork() calls. If the sysctl option
50767 + is enabled, a sysctl option with name "execve_limiting" is created.
50768 +
50769 +config GRKERNSEC_DMESG
50770 + bool "Dmesg(8) restriction"
50771 + help
50772 + If you say Y here, non-root users will not be able to use dmesg(8)
50773 + to view up to the last 4kb of messages in the kernel's log buffer.
50774 + The kernel's log buffer often contains kernel addresses and other
50775 + identifying information useful to an attacker in fingerprinting a
50776 + system for a targeted exploit.
50777 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50778 + created.
50779 +
50780 +config GRKERNSEC_HARDEN_PTRACE
50781 + bool "Deter ptrace-based process snooping"
50782 + help
50783 + If you say Y here, TTY sniffers and other malicious monitoring
50784 + programs implemented through ptrace will be defeated. If you
50785 + have been using the RBAC system, this option has already been
50786 + enabled for several years for all users, with the ability to make
50787 + fine-grained exceptions.
50788 +
50789 + This option only affects the ability of non-root users to ptrace
50790 + processes that are not a descendent of the ptracing process.
50791 + This means that strace ./binary and gdb ./binary will still work,
50792 + but attaching to arbitrary processes will not. If the sysctl
50793 + option is enabled, a sysctl option with name "harden_ptrace" is
50794 + created.
50795 +
50796 +config GRKERNSEC_TPE
50797 + bool "Trusted Path Execution (TPE)"
50798 + help
50799 + If you say Y here, you will be able to choose a gid to add to the
50800 + supplementary groups of users you want to mark as "untrusted."
50801 + These users will not be able to execute any files that are not in
50802 + root-owned directories writable only by root. If the sysctl option
50803 + is enabled, a sysctl option with name "tpe" is created.
50804 +
50805 +config GRKERNSEC_TPE_ALL
50806 + bool "Partially restrict all non-root users"
50807 + depends on GRKERNSEC_TPE
50808 + help
50809 + If you say Y here, all non-root users will be covered under
50810 + a weaker TPE restriction. This is separate from, and in addition to,
50811 + the main TPE options that you have selected elsewhere. Thus, if a
50812 + "trusted" GID is chosen, this restriction applies to even that GID.
50813 + Under this restriction, all non-root users will only be allowed to
50814 + execute files in directories they own that are not group or
50815 + world-writable, or in directories owned by root and writable only by
50816 + root. If the sysctl option is enabled, a sysctl option with name
50817 + "tpe_restrict_all" is created.
50818 +
50819 +config GRKERNSEC_TPE_INVERT
50820 + bool "Invert GID option"
50821 + depends on GRKERNSEC_TPE
50822 + help
50823 + If you say Y here, the group you specify in the TPE configuration will
50824 + decide what group TPE restrictions will be *disabled* for. This
50825 + option is useful if you want TPE restrictions to be applied to most
50826 + users on the system. If the sysctl option is enabled, a sysctl option
50827 + with name "tpe_invert" is created. Unlike other sysctl options, this
50828 + entry will default to on for backward-compatibility.
50829 +
50830 +config GRKERNSEC_TPE_GID
50831 + int "GID for untrusted users"
50832 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50833 + default 1005
50834 + help
50835 + Setting this GID determines what group TPE restrictions will be
50836 + *enabled* for. If the sysctl option is enabled, a sysctl option
50837 + with name "tpe_gid" is created.
50838 +
50839 +config GRKERNSEC_TPE_GID
50840 + int "GID for trusted users"
50841 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50842 + default 1005
50843 + help
50844 + Setting this GID determines what group TPE restrictions will be
50845 + *disabled* for. If the sysctl option is enabled, a sysctl option
50846 + with name "tpe_gid" is created.
50847 +
50848 +endmenu
50849 +menu "Network Protections"
50850 +depends on GRKERNSEC
50851 +
50852 +config GRKERNSEC_RANDNET
50853 + bool "Larger entropy pools"
50854 + help
50855 + If you say Y here, the entropy pools used for many features of Linux
50856 + and grsecurity will be doubled in size. Since several grsecurity
50857 + features use additional randomness, it is recommended that you say Y
50858 + here. Saying Y here has a similar effect as modifying
50859 + /proc/sys/kernel/random/poolsize.
50860 +
50861 +config GRKERNSEC_BLACKHOLE
50862 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50863 + help
50864 + If you say Y here, neither TCP resets nor ICMP
50865 + destination-unreachable packets will be sent in response to packets
50866 + sent to ports for which no associated listening process exists.
50867 + This feature supports both IPV4 and IPV6 and exempts the
50868 + loopback interface from blackholing. Enabling this feature
50869 + makes a host more resilient to DoS attacks and reduces network
50870 + visibility against scanners.
50871 +
50872 + The blackhole feature as-implemented is equivalent to the FreeBSD
50873 + blackhole feature, as it prevents RST responses to all packets, not
50874 + just SYNs. Under most application behavior this causes no
50875 + problems, but applications (like haproxy) may not close certain
50876 + connections in a way that cleanly terminates them on the remote
50877 + end, leaving the remote host in LAST_ACK state. Because of this
50878 + side-effect and to prevent intentional LAST_ACK DoSes, this
50879 + feature also adds automatic mitigation against such attacks.
50880 + The mitigation drastically reduces the amount of time a socket
50881 + can spend in LAST_ACK state. If you're using haproxy and not
50882 + all servers it connects to have this option enabled, consider
50883 + disabling this feature on the haproxy host.
50884 +
50885 + If the sysctl option is enabled, two sysctl options with names
50886 + "ip_blackhole" and "lastack_retries" will be created.
50887 + While "ip_blackhole" takes the standard zero/non-zero on/off
50888 + toggle, "lastack_retries" uses the same kinds of values as
50889 + "tcp_retries1" and "tcp_retries2". The default value of 4
50890 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50891 + state.
50892 +
50893 +config GRKERNSEC_SOCKET
50894 + bool "Socket restrictions"
50895 + help
50896 + If you say Y here, you will be able to choose from several options.
50897 + If you assign a GID on your system and add it to the supplementary
50898 + groups of users you want to restrict socket access to, this patch
50899 + will perform up to three things, based on the option(s) you choose.
50900 +
50901 +config GRKERNSEC_SOCKET_ALL
50902 + bool "Deny any sockets to group"
50903 + depends on GRKERNSEC_SOCKET
50904 + help
50905 + If you say Y here, you will be able to choose a GID of whose users will
50906 + be unable to connect to other hosts from your machine or run server
50907 + applications from your machine. If the sysctl option is enabled, a
50908 + sysctl option with name "socket_all" is created.
50909 +
50910 +config GRKERNSEC_SOCKET_ALL_GID
50911 + int "GID to deny all sockets for"
50912 + depends on GRKERNSEC_SOCKET_ALL
50913 + default 1004
50914 + help
50915 + Here you can choose the GID to disable socket access for. Remember to
50916 + add the users you want socket access disabled for to the GID
50917 + specified here. If the sysctl option is enabled, a sysctl option
50918 + with name "socket_all_gid" is created.
50919 +
50920 +config GRKERNSEC_SOCKET_CLIENT
50921 + bool "Deny client sockets to group"
50922 + depends on GRKERNSEC_SOCKET
50923 + help
50924 + If you say Y here, you will be able to choose a GID of whose users will
50925 + be unable to connect to other hosts from your machine, but will be
50926 + able to run servers. If this option is enabled, all users in the group
50927 + you specify will have to use passive mode when initiating ftp transfers
50928 + from the shell on your machine. If the sysctl option is enabled, a
50929 + sysctl option with name "socket_client" is created.
50930 +
50931 +config GRKERNSEC_SOCKET_CLIENT_GID
50932 + int "GID to deny client sockets for"
50933 + depends on GRKERNSEC_SOCKET_CLIENT
50934 + default 1003
50935 + help
50936 + Here you can choose the GID to disable client socket access for.
50937 + Remember to add the users you want client socket access disabled for to
50938 + the GID specified here. If the sysctl option is enabled, a sysctl
50939 + option with name "socket_client_gid" is created.
50940 +
50941 +config GRKERNSEC_SOCKET_SERVER
50942 + bool "Deny server sockets to group"
50943 + depends on GRKERNSEC_SOCKET
50944 + help
50945 + If you say Y here, you will be able to choose a GID of whose users will
50946 + be unable to run server applications from your machine. If the sysctl
50947 + option is enabled, a sysctl option with name "socket_server" is created.
50948 +
50949 +config GRKERNSEC_SOCKET_SERVER_GID
50950 + int "GID to deny server sockets for"
50951 + depends on GRKERNSEC_SOCKET_SERVER
50952 + default 1002
50953 + help
50954 + Here you can choose the GID to disable server socket access for.
50955 + Remember to add the users you want server socket access disabled for to
50956 + the GID specified here. If the sysctl option is enabled, a sysctl
50957 + option with name "socket_server_gid" is created.
50958 +
50959 +endmenu
50960 +menu "Sysctl support"
50961 +depends on GRKERNSEC && SYSCTL
50962 +
50963 +config GRKERNSEC_SYSCTL
50964 + bool "Sysctl support"
50965 + help
50966 + If you say Y here, you will be able to change the options that
50967 + grsecurity runs with at bootup, without having to recompile your
50968 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50969 + to enable (1) or disable (0) various features. All the sysctl entries
50970 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50971 + All features enabled in the kernel configuration are disabled at boot
50972 + if you do not say Y to the "Turn on features by default" option.
50973 + All options should be set at startup, and the grsec_lock entry should
50974 + be set to a non-zero value after all the options are set.
50975 + *THIS IS EXTREMELY IMPORTANT*
50976 +
50977 +config GRKERNSEC_SYSCTL_DISTRO
50978 + bool "Extra sysctl support for distro makers (READ HELP)"
50979 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50980 + help
50981 + If you say Y here, additional sysctl options will be created
50982 + for features that affect processes running as root. Therefore,
50983 + it is critical when using this option that the grsec_lock entry be
50984 + enabled after boot. Only distros with prebuilt kernel packages
50985 + with this option enabled that can ensure grsec_lock is enabled
50986 + after boot should use this option.
50987 + *Failure to set grsec_lock after boot makes all grsec features
50988 + this option covers useless*
50989 +
50990 + Currently this option creates the following sysctl entries:
50991 + "Disable Privileged I/O": "disable_priv_io"
50992 +
50993 +config GRKERNSEC_SYSCTL_ON
50994 + bool "Turn on features by default"
50995 + depends on GRKERNSEC_SYSCTL
50996 + help
50997 + If you say Y here, instead of having all features enabled in the
50998 + kernel configuration disabled at boot time, the features will be
50999 + enabled at boot time. It is recommended you say Y here unless
51000 + there is some reason you would want all sysctl-tunable features to
51001 + be disabled by default. As mentioned elsewhere, it is important
51002 + to enable the grsec_lock entry once you have finished modifying
51003 + the sysctl entries.
51004 +
51005 +endmenu
51006 +menu "Logging Options"
51007 +depends on GRKERNSEC
51008 +
51009 +config GRKERNSEC_FLOODTIME
51010 + int "Seconds in between log messages (minimum)"
51011 + default 10
51012 + help
51013 + This option allows you to enforce the number of seconds between
51014 + grsecurity log messages. The default should be suitable for most
51015 + people, however, if you choose to change it, choose a value small enough
51016 + to allow informative logs to be produced, but large enough to
51017 + prevent flooding.
51018 +
51019 +config GRKERNSEC_FLOODBURST
51020 + int "Number of messages in a burst (maximum)"
51021 + default 4
51022 + help
51023 + This option allows you to choose the maximum number of messages allowed
51024 + within the flood time interval you chose in a separate option. The
51025 + default should be suitable for most people, however if you find that
51026 + many of your logs are being interpreted as flooding, you may want to
51027 + raise this value.
51028 +
51029 +endmenu
51030 +
51031 +endmenu
51032 diff -urNp linux-2.6.39.4/grsecurity/Makefile linux-2.6.39.4/grsecurity/Makefile
51033 --- linux-2.6.39.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
51034 +++ linux-2.6.39.4/grsecurity/Makefile 2011-08-05 19:44:37.000000000 -0400
51035 @@ -0,0 +1,33 @@
51036 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51037 +# during 2001-2009 it has been completely redesigned by Brad Spengler
51038 +# into an RBAC system
51039 +#
51040 +# All code in this directory and various hooks inserted throughout the kernel
51041 +# are copyright Brad Spengler - Open Source Security, Inc., and released
51042 +# under the GPL v2 or higher
51043 +
51044 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51045 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
51046 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51047 +
51048 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51049 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51050 + gracl_learn.o grsec_log.o
51051 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51052 +
51053 +ifdef CONFIG_NET
51054 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51055 +endif
51056 +
51057 +ifndef CONFIG_GRKERNSEC
51058 +obj-y += grsec_disabled.o
51059 +endif
51060 +
51061 +ifdef CONFIG_GRKERNSEC_HIDESYM
51062 +extra-y := grsec_hidesym.o
51063 +$(obj)/grsec_hidesym.o:
51064 + @-chmod -f 500 /boot
51065 + @-chmod -f 500 /lib/modules
51066 + @-chmod -f 700 .
51067 + @echo ' grsec: protected kernel image paths'
51068 +endif
51069 diff -urNp linux-2.6.39.4/include/acpi/acpi_bus.h linux-2.6.39.4/include/acpi/acpi_bus.h
51070 --- linux-2.6.39.4/include/acpi/acpi_bus.h 2011-05-19 00:06:34.000000000 -0400
51071 +++ linux-2.6.39.4/include/acpi/acpi_bus.h 2011-08-05 20:34:06.000000000 -0400
51072 @@ -107,7 +107,7 @@ struct acpi_device_ops {
51073 acpi_op_bind bind;
51074 acpi_op_unbind unbind;
51075 acpi_op_notify notify;
51076 -};
51077 +} __no_const;
51078
51079 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
51080
51081 diff -urNp linux-2.6.39.4/include/asm-generic/atomic-long.h linux-2.6.39.4/include/asm-generic/atomic-long.h
51082 --- linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-05-19 00:06:34.000000000 -0400
51083 +++ linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-08-05 20:34:06.000000000 -0400
51084 @@ -22,6 +22,12 @@
51085
51086 typedef atomic64_t atomic_long_t;
51087
51088 +#ifdef CONFIG_PAX_REFCOUNT
51089 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
51090 +#else
51091 +typedef atomic64_t atomic_long_unchecked_t;
51092 +#endif
51093 +
51094 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
51095
51096 static inline long atomic_long_read(atomic_long_t *l)
51097 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
51098 return (long)atomic64_read(v);
51099 }
51100
51101 +#ifdef CONFIG_PAX_REFCOUNT
51102 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51103 +{
51104 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51105 +
51106 + return (long)atomic64_read_unchecked(v);
51107 +}
51108 +#endif
51109 +
51110 static inline void atomic_long_set(atomic_long_t *l, long i)
51111 {
51112 atomic64_t *v = (atomic64_t *)l;
51113 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
51114 atomic64_set(v, i);
51115 }
51116
51117 +#ifdef CONFIG_PAX_REFCOUNT
51118 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51119 +{
51120 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51121 +
51122 + atomic64_set_unchecked(v, i);
51123 +}
51124 +#endif
51125 +
51126 static inline void atomic_long_inc(atomic_long_t *l)
51127 {
51128 atomic64_t *v = (atomic64_t *)l;
51129 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
51130 atomic64_inc(v);
51131 }
51132
51133 +#ifdef CONFIG_PAX_REFCOUNT
51134 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51135 +{
51136 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51137 +
51138 + atomic64_inc_unchecked(v);
51139 +}
51140 +#endif
51141 +
51142 static inline void atomic_long_dec(atomic_long_t *l)
51143 {
51144 atomic64_t *v = (atomic64_t *)l;
51145 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51146 atomic64_dec(v);
51147 }
51148
51149 +#ifdef CONFIG_PAX_REFCOUNT
51150 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51151 +{
51152 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51153 +
51154 + atomic64_dec_unchecked(v);
51155 +}
51156 +#endif
51157 +
51158 static inline void atomic_long_add(long i, atomic_long_t *l)
51159 {
51160 atomic64_t *v = (atomic64_t *)l;
51161 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51162 atomic64_add(i, v);
51163 }
51164
51165 +#ifdef CONFIG_PAX_REFCOUNT
51166 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51167 +{
51168 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51169 +
51170 + atomic64_add_unchecked(i, v);
51171 +}
51172 +#endif
51173 +
51174 static inline void atomic_long_sub(long i, atomic_long_t *l)
51175 {
51176 atomic64_t *v = (atomic64_t *)l;
51177 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51178 atomic64_sub(i, v);
51179 }
51180
51181 +#ifdef CONFIG_PAX_REFCOUNT
51182 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51183 +{
51184 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51185 +
51186 + atomic64_sub_unchecked(i, v);
51187 +}
51188 +#endif
51189 +
51190 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51191 {
51192 atomic64_t *v = (atomic64_t *)l;
51193 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51194 return (long)atomic64_inc_return(v);
51195 }
51196
51197 +#ifdef CONFIG_PAX_REFCOUNT
51198 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51199 +{
51200 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51201 +
51202 + return (long)atomic64_inc_return_unchecked(v);
51203 +}
51204 +#endif
51205 +
51206 static inline long atomic_long_dec_return(atomic_long_t *l)
51207 {
51208 atomic64_t *v = (atomic64_t *)l;
51209 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51210
51211 typedef atomic_t atomic_long_t;
51212
51213 +#ifdef CONFIG_PAX_REFCOUNT
51214 +typedef atomic_unchecked_t atomic_long_unchecked_t;
51215 +#else
51216 +typedef atomic_t atomic_long_unchecked_t;
51217 +#endif
51218 +
51219 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51220 static inline long atomic_long_read(atomic_long_t *l)
51221 {
51222 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51223 return (long)atomic_read(v);
51224 }
51225
51226 +#ifdef CONFIG_PAX_REFCOUNT
51227 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51228 +{
51229 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51230 +
51231 + return (long)atomic_read_unchecked(v);
51232 +}
51233 +#endif
51234 +
51235 static inline void atomic_long_set(atomic_long_t *l, long i)
51236 {
51237 atomic_t *v = (atomic_t *)l;
51238 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51239 atomic_set(v, i);
51240 }
51241
51242 +#ifdef CONFIG_PAX_REFCOUNT
51243 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51244 +{
51245 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51246 +
51247 + atomic_set_unchecked(v, i);
51248 +}
51249 +#endif
51250 +
51251 static inline void atomic_long_inc(atomic_long_t *l)
51252 {
51253 atomic_t *v = (atomic_t *)l;
51254 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51255 atomic_inc(v);
51256 }
51257
51258 +#ifdef CONFIG_PAX_REFCOUNT
51259 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51260 +{
51261 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51262 +
51263 + atomic_inc_unchecked(v);
51264 +}
51265 +#endif
51266 +
51267 static inline void atomic_long_dec(atomic_long_t *l)
51268 {
51269 atomic_t *v = (atomic_t *)l;
51270 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51271 atomic_dec(v);
51272 }
51273
51274 +#ifdef CONFIG_PAX_REFCOUNT
51275 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51276 +{
51277 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51278 +
51279 + atomic_dec_unchecked(v);
51280 +}
51281 +#endif
51282 +
51283 static inline void atomic_long_add(long i, atomic_long_t *l)
51284 {
51285 atomic_t *v = (atomic_t *)l;
51286 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51287 atomic_add(i, v);
51288 }
51289
51290 +#ifdef CONFIG_PAX_REFCOUNT
51291 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51292 +{
51293 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51294 +
51295 + atomic_add_unchecked(i, v);
51296 +}
51297 +#endif
51298 +
51299 static inline void atomic_long_sub(long i, atomic_long_t *l)
51300 {
51301 atomic_t *v = (atomic_t *)l;
51302 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51303 atomic_sub(i, v);
51304 }
51305
51306 +#ifdef CONFIG_PAX_REFCOUNT
51307 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51308 +{
51309 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51310 +
51311 + atomic_sub_unchecked(i, v);
51312 +}
51313 +#endif
51314 +
51315 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51316 {
51317 atomic_t *v = (atomic_t *)l;
51318 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51319 return (long)atomic_inc_return(v);
51320 }
51321
51322 +#ifdef CONFIG_PAX_REFCOUNT
51323 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51324 +{
51325 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51326 +
51327 + return (long)atomic_inc_return_unchecked(v);
51328 +}
51329 +#endif
51330 +
51331 static inline long atomic_long_dec_return(atomic_long_t *l)
51332 {
51333 atomic_t *v = (atomic_t *)l;
51334 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51335
51336 #endif /* BITS_PER_LONG == 64 */
51337
51338 +#ifdef CONFIG_PAX_REFCOUNT
51339 +static inline void pax_refcount_needs_these_functions(void)
51340 +{
51341 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
51342 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51343 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51344 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51345 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51346 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51347 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51348 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51349 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51350 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51351 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51352 +
51353 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51354 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51355 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51356 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51357 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51358 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51359 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51360 +}
51361 +#else
51362 +#define atomic_read_unchecked(v) atomic_read(v)
51363 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51364 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51365 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51366 +#define atomic_inc_unchecked(v) atomic_inc(v)
51367 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51368 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51369 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51370 +#define atomic_dec_unchecked(v) atomic_dec(v)
51371 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51372 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51373 +
51374 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
51375 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51376 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51377 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51378 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51379 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51380 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51381 +#endif
51382 +
51383 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51384 diff -urNp linux-2.6.39.4/include/asm-generic/cache.h linux-2.6.39.4/include/asm-generic/cache.h
51385 --- linux-2.6.39.4/include/asm-generic/cache.h 2011-05-19 00:06:34.000000000 -0400
51386 +++ linux-2.6.39.4/include/asm-generic/cache.h 2011-08-05 19:44:37.000000000 -0400
51387 @@ -6,7 +6,7 @@
51388 * cache lines need to provide their own cache.h.
51389 */
51390
51391 -#define L1_CACHE_SHIFT 5
51392 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51393 +#define L1_CACHE_SHIFT 5UL
51394 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51395
51396 #endif /* __ASM_GENERIC_CACHE_H */
51397 diff -urNp linux-2.6.39.4/include/asm-generic/int-l64.h linux-2.6.39.4/include/asm-generic/int-l64.h
51398 --- linux-2.6.39.4/include/asm-generic/int-l64.h 2011-05-19 00:06:34.000000000 -0400
51399 +++ linux-2.6.39.4/include/asm-generic/int-l64.h 2011-08-05 19:44:37.000000000 -0400
51400 @@ -46,6 +46,8 @@ typedef unsigned int u32;
51401 typedef signed long s64;
51402 typedef unsigned long u64;
51403
51404 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51405 +
51406 #define S8_C(x) x
51407 #define U8_C(x) x ## U
51408 #define S16_C(x) x
51409 diff -urNp linux-2.6.39.4/include/asm-generic/int-ll64.h linux-2.6.39.4/include/asm-generic/int-ll64.h
51410 --- linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-05-19 00:06:34.000000000 -0400
51411 +++ linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-08-05 19:44:37.000000000 -0400
51412 @@ -51,6 +51,8 @@ typedef unsigned int u32;
51413 typedef signed long long s64;
51414 typedef unsigned long long u64;
51415
51416 +typedef unsigned long long intoverflow_t;
51417 +
51418 #define S8_C(x) x
51419 #define U8_C(x) x ## U
51420 #define S16_C(x) x
51421 diff -urNp linux-2.6.39.4/include/asm-generic/kmap_types.h linux-2.6.39.4/include/asm-generic/kmap_types.h
51422 --- linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
51423 +++ linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-08-05 19:44:37.000000000 -0400
51424 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51425 KMAP_D(17) KM_NMI,
51426 KMAP_D(18) KM_NMI_PTE,
51427 KMAP_D(19) KM_KDB,
51428 +KMAP_D(20) KM_CLEARPAGE,
51429 /*
51430 * Remember to update debug_kmap_atomic() when adding new kmap types!
51431 */
51432 -KMAP_D(20) KM_TYPE_NR
51433 +KMAP_D(21) KM_TYPE_NR
51434 };
51435
51436 #undef KMAP_D
51437 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable.h linux-2.6.39.4/include/asm-generic/pgtable.h
51438 --- linux-2.6.39.4/include/asm-generic/pgtable.h 2011-05-19 00:06:34.000000000 -0400
51439 +++ linux-2.6.39.4/include/asm-generic/pgtable.h 2011-08-05 19:44:37.000000000 -0400
51440 @@ -447,6 +447,14 @@ static inline int pmd_write(pmd_t pmd)
51441 #endif /* __HAVE_ARCH_PMD_WRITE */
51442 #endif
51443
51444 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51445 +static inline unsigned long pax_open_kernel(void) { return 0; }
51446 +#endif
51447 +
51448 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51449 +static inline unsigned long pax_close_kernel(void) { return 0; }
51450 +#endif
51451 +
51452 #endif /* !__ASSEMBLY__ */
51453
51454 #endif /* _ASM_GENERIC_PGTABLE_H */
51455 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h
51456 --- linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-05-19 00:06:34.000000000 -0400
51457 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-08-05 19:44:37.000000000 -0400
51458 @@ -1,14 +1,19 @@
51459 #ifndef _PGTABLE_NOPMD_H
51460 #define _PGTABLE_NOPMD_H
51461
51462 -#ifndef __ASSEMBLY__
51463 -
51464 #include <asm-generic/pgtable-nopud.h>
51465
51466 -struct mm_struct;
51467 -
51468 #define __PAGETABLE_PMD_FOLDED
51469
51470 +#define PMD_SHIFT PUD_SHIFT
51471 +#define PTRS_PER_PMD 1
51472 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51473 +#define PMD_MASK (~(PMD_SIZE-1))
51474 +
51475 +#ifndef __ASSEMBLY__
51476 +
51477 +struct mm_struct;
51478 +
51479 /*
51480 * Having the pmd type consist of a pud gets the size right, and allows
51481 * us to conceptually access the pud entry that this pmd is folded into
51482 @@ -16,11 +21,6 @@ struct mm_struct;
51483 */
51484 typedef struct { pud_t pud; } pmd_t;
51485
51486 -#define PMD_SHIFT PUD_SHIFT
51487 -#define PTRS_PER_PMD 1
51488 -#define PMD_SIZE (1UL << PMD_SHIFT)
51489 -#define PMD_MASK (~(PMD_SIZE-1))
51490 -
51491 /*
51492 * The "pud_xxx()" functions here are trivial for a folded two-level
51493 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51494 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopud.h linux-2.6.39.4/include/asm-generic/pgtable-nopud.h
51495 --- linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-05-19 00:06:34.000000000 -0400
51496 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-08-05 19:44:37.000000000 -0400
51497 @@ -1,10 +1,15 @@
51498 #ifndef _PGTABLE_NOPUD_H
51499 #define _PGTABLE_NOPUD_H
51500
51501 -#ifndef __ASSEMBLY__
51502 -
51503 #define __PAGETABLE_PUD_FOLDED
51504
51505 +#define PUD_SHIFT PGDIR_SHIFT
51506 +#define PTRS_PER_PUD 1
51507 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51508 +#define PUD_MASK (~(PUD_SIZE-1))
51509 +
51510 +#ifndef __ASSEMBLY__
51511 +
51512 /*
51513 * Having the pud type consist of a pgd gets the size right, and allows
51514 * us to conceptually access the pgd entry that this pud is folded into
51515 @@ -12,11 +17,6 @@
51516 */
51517 typedef struct { pgd_t pgd; } pud_t;
51518
51519 -#define PUD_SHIFT PGDIR_SHIFT
51520 -#define PTRS_PER_PUD 1
51521 -#define PUD_SIZE (1UL << PUD_SHIFT)
51522 -#define PUD_MASK (~(PUD_SIZE-1))
51523 -
51524 /*
51525 * The "pgd_xxx()" functions here are trivial for a folded two-level
51526 * setup: the pud is never bad, and a pud always exists (as it's folded
51527 diff -urNp linux-2.6.39.4/include/asm-generic/vmlinux.lds.h linux-2.6.39.4/include/asm-generic/vmlinux.lds.h
51528 --- linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-05-19 00:06:34.000000000 -0400
51529 +++ linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-08-05 19:44:37.000000000 -0400
51530 @@ -213,6 +213,7 @@
51531 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51532 VMLINUX_SYMBOL(__start_rodata) = .; \
51533 *(.rodata) *(.rodata.*) \
51534 + *(.data..read_only) \
51535 *(__vermagic) /* Kernel version magic */ \
51536 . = ALIGN(8); \
51537 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51538 @@ -707,14 +708,15 @@
51539 * section in the linker script will go there too. @phdr should have
51540 * a leading colon.
51541 *
51542 - * Note that this macros defines __per_cpu_load as an absolute symbol.
51543 + * Note that this macros defines per_cpu_load as an absolute symbol.
51544 * If there is no need to put the percpu section at a predetermined
51545 * address, use PERCPU().
51546 */
51547 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51548 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
51549 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51550 + per_cpu_load = .; \
51551 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51552 - LOAD_OFFSET) { \
51553 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51554 VMLINUX_SYMBOL(__per_cpu_start) = .; \
51555 *(.data..percpu..first) \
51556 . = ALIGN(PAGE_SIZE); \
51557 @@ -726,7 +728,7 @@
51558 *(.data..percpu..shared_aligned) \
51559 VMLINUX_SYMBOL(__per_cpu_end) = .; \
51560 } phdr \
51561 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51562 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51563
51564 /**
51565 * PERCPU - define output section for percpu area, simple version
51566 diff -urNp linux-2.6.39.4/include/drm/drm_crtc_helper.h linux-2.6.39.4/include/drm/drm_crtc_helper.h
51567 --- linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-05-19 00:06:34.000000000 -0400
51568 +++ linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-08-05 20:34:06.000000000 -0400
51569 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51570
51571 /* disable crtc when not in use - more explicit than dpms off */
51572 void (*disable)(struct drm_crtc *crtc);
51573 -};
51574 +} __no_const;
51575
51576 struct drm_encoder_helper_funcs {
51577 void (*dpms)(struct drm_encoder *encoder, int mode);
51578 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51579 struct drm_connector *connector);
51580 /* disable encoder when not in use - more explicit than dpms off */
51581 void (*disable)(struct drm_encoder *encoder);
51582 -};
51583 +} __no_const;
51584
51585 struct drm_connector_helper_funcs {
51586 int (*get_modes)(struct drm_connector *connector);
51587 diff -urNp linux-2.6.39.4/include/drm/drmP.h linux-2.6.39.4/include/drm/drmP.h
51588 --- linux-2.6.39.4/include/drm/drmP.h 2011-05-19 00:06:34.000000000 -0400
51589 +++ linux-2.6.39.4/include/drm/drmP.h 2011-08-05 20:34:06.000000000 -0400
51590 @@ -73,6 +73,7 @@
51591 #include <linux/workqueue.h>
51592 #include <linux/poll.h>
51593 #include <asm/pgalloc.h>
51594 +#include <asm/local.h>
51595 #include "drm.h"
51596
51597 #include <linux/idr.h>
51598 @@ -1023,7 +1024,7 @@ struct drm_device {
51599
51600 /** \name Usage Counters */
51601 /*@{ */
51602 - int open_count; /**< Outstanding files open */
51603 + local_t open_count; /**< Outstanding files open */
51604 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51605 atomic_t vma_count; /**< Outstanding vma areas open */
51606 int buf_use; /**< Buffers in use -- cannot alloc */
51607 @@ -1034,7 +1035,7 @@ struct drm_device {
51608 /*@{ */
51609 unsigned long counters;
51610 enum drm_stat_type types[15];
51611 - atomic_t counts[15];
51612 + atomic_unchecked_t counts[15];
51613 /*@} */
51614
51615 struct list_head filelist;
51616 diff -urNp linux-2.6.39.4/include/drm/ttm/ttm_memory.h linux-2.6.39.4/include/drm/ttm/ttm_memory.h
51617 --- linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-05-19 00:06:34.000000000 -0400
51618 +++ linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-08-05 20:34:06.000000000 -0400
51619 @@ -47,7 +47,7 @@
51620
51621 struct ttm_mem_shrink {
51622 int (*do_shrink) (struct ttm_mem_shrink *);
51623 -};
51624 +} __no_const;
51625
51626 /**
51627 * struct ttm_mem_global - Global memory accounting structure.
51628 diff -urNp linux-2.6.39.4/include/linux/a.out.h linux-2.6.39.4/include/linux/a.out.h
51629 --- linux-2.6.39.4/include/linux/a.out.h 2011-05-19 00:06:34.000000000 -0400
51630 +++ linux-2.6.39.4/include/linux/a.out.h 2011-08-05 19:44:37.000000000 -0400
51631 @@ -39,6 +39,14 @@ enum machine_type {
51632 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51633 };
51634
51635 +/* Constants for the N_FLAGS field */
51636 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51637 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51638 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51639 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51640 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51641 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51642 +
51643 #if !defined (N_MAGIC)
51644 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51645 #endif
51646 diff -urNp linux-2.6.39.4/include/linux/atmdev.h linux-2.6.39.4/include/linux/atmdev.h
51647 --- linux-2.6.39.4/include/linux/atmdev.h 2011-05-19 00:06:34.000000000 -0400
51648 +++ linux-2.6.39.4/include/linux/atmdev.h 2011-08-05 19:44:37.000000000 -0400
51649 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51650 #endif
51651
51652 struct k_atm_aal_stats {
51653 -#define __HANDLE_ITEM(i) atomic_t i
51654 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
51655 __AAL_STAT_ITEMS
51656 #undef __HANDLE_ITEM
51657 };
51658 diff -urNp linux-2.6.39.4/include/linux/binfmts.h linux-2.6.39.4/include/linux/binfmts.h
51659 --- linux-2.6.39.4/include/linux/binfmts.h 2011-05-19 00:06:34.000000000 -0400
51660 +++ linux-2.6.39.4/include/linux/binfmts.h 2011-08-05 19:44:37.000000000 -0400
51661 @@ -92,6 +92,7 @@ struct linux_binfmt {
51662 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51663 int (*load_shlib)(struct file *);
51664 int (*core_dump)(struct coredump_params *cprm);
51665 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51666 unsigned long min_coredump; /* minimal dump size */
51667 };
51668
51669 diff -urNp linux-2.6.39.4/include/linux/blkdev.h linux-2.6.39.4/include/linux/blkdev.h
51670 --- linux-2.6.39.4/include/linux/blkdev.h 2011-06-03 00:04:14.000000000 -0400
51671 +++ linux-2.6.39.4/include/linux/blkdev.h 2011-08-05 20:34:06.000000000 -0400
51672 @@ -1307,7 +1307,7 @@ struct block_device_operations {
51673 int (*getgeo)(struct block_device *, struct hd_geometry *);
51674 /* this callback is with swap_lock and sometimes page table lock held */
51675 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51676 - struct module *owner;
51677 + struct module * const owner;
51678 };
51679
51680 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51681 diff -urNp linux-2.6.39.4/include/linux/blktrace_api.h linux-2.6.39.4/include/linux/blktrace_api.h
51682 --- linux-2.6.39.4/include/linux/blktrace_api.h 2011-05-19 00:06:34.000000000 -0400
51683 +++ linux-2.6.39.4/include/linux/blktrace_api.h 2011-08-05 19:44:37.000000000 -0400
51684 @@ -161,7 +161,7 @@ struct blk_trace {
51685 struct dentry *dir;
51686 struct dentry *dropped_file;
51687 struct dentry *msg_file;
51688 - atomic_t dropped;
51689 + atomic_unchecked_t dropped;
51690 };
51691
51692 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51693 diff -urNp linux-2.6.39.4/include/linux/byteorder/little_endian.h linux-2.6.39.4/include/linux/byteorder/little_endian.h
51694 --- linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-05-19 00:06:34.000000000 -0400
51695 +++ linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-08-05 19:44:37.000000000 -0400
51696 @@ -42,51 +42,51 @@
51697
51698 static inline __le64 __cpu_to_le64p(const __u64 *p)
51699 {
51700 - return (__force __le64)*p;
51701 + return (__force const __le64)*p;
51702 }
51703 static inline __u64 __le64_to_cpup(const __le64 *p)
51704 {
51705 - return (__force __u64)*p;
51706 + return (__force const __u64)*p;
51707 }
51708 static inline __le32 __cpu_to_le32p(const __u32 *p)
51709 {
51710 - return (__force __le32)*p;
51711 + return (__force const __le32)*p;
51712 }
51713 static inline __u32 __le32_to_cpup(const __le32 *p)
51714 {
51715 - return (__force __u32)*p;
51716 + return (__force const __u32)*p;
51717 }
51718 static inline __le16 __cpu_to_le16p(const __u16 *p)
51719 {
51720 - return (__force __le16)*p;
51721 + return (__force const __le16)*p;
51722 }
51723 static inline __u16 __le16_to_cpup(const __le16 *p)
51724 {
51725 - return (__force __u16)*p;
51726 + return (__force const __u16)*p;
51727 }
51728 static inline __be64 __cpu_to_be64p(const __u64 *p)
51729 {
51730 - return (__force __be64)__swab64p(p);
51731 + return (__force const __be64)__swab64p(p);
51732 }
51733 static inline __u64 __be64_to_cpup(const __be64 *p)
51734 {
51735 - return __swab64p((__u64 *)p);
51736 + return __swab64p((const __u64 *)p);
51737 }
51738 static inline __be32 __cpu_to_be32p(const __u32 *p)
51739 {
51740 - return (__force __be32)__swab32p(p);
51741 + return (__force const __be32)__swab32p(p);
51742 }
51743 static inline __u32 __be32_to_cpup(const __be32 *p)
51744 {
51745 - return __swab32p((__u32 *)p);
51746 + return __swab32p((const __u32 *)p);
51747 }
51748 static inline __be16 __cpu_to_be16p(const __u16 *p)
51749 {
51750 - return (__force __be16)__swab16p(p);
51751 + return (__force const __be16)__swab16p(p);
51752 }
51753 static inline __u16 __be16_to_cpup(const __be16 *p)
51754 {
51755 - return __swab16p((__u16 *)p);
51756 + return __swab16p((const __u16 *)p);
51757 }
51758 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51759 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51760 diff -urNp linux-2.6.39.4/include/linux/cache.h linux-2.6.39.4/include/linux/cache.h
51761 --- linux-2.6.39.4/include/linux/cache.h 2011-05-19 00:06:34.000000000 -0400
51762 +++ linux-2.6.39.4/include/linux/cache.h 2011-08-05 19:44:37.000000000 -0400
51763 @@ -16,6 +16,10 @@
51764 #define __read_mostly
51765 #endif
51766
51767 +#ifndef __read_only
51768 +#define __read_only __read_mostly
51769 +#endif
51770 +
51771 #ifndef ____cacheline_aligned
51772 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51773 #endif
51774 diff -urNp linux-2.6.39.4/include/linux/capability.h linux-2.6.39.4/include/linux/capability.h
51775 --- linux-2.6.39.4/include/linux/capability.h 2011-05-19 00:06:34.000000000 -0400
51776 +++ linux-2.6.39.4/include/linux/capability.h 2011-08-05 19:44:37.000000000 -0400
51777 @@ -547,6 +547,9 @@ extern bool capable(int cap);
51778 extern bool ns_capable(struct user_namespace *ns, int cap);
51779 extern bool task_ns_capable(struct task_struct *t, int cap);
51780 extern bool nsown_capable(int cap);
51781 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51782 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51783 +extern bool capable_nolog(int cap);
51784
51785 /* audit system wants to get cap info from files as well */
51786 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51787 diff -urNp linux-2.6.39.4/include/linux/compiler-gcc4.h linux-2.6.39.4/include/linux/compiler-gcc4.h
51788 --- linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-05-19 00:06:34.000000000 -0400
51789 +++ linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-08-05 20:34:06.000000000 -0400
51790 @@ -31,6 +31,9 @@
51791
51792
51793 #if __GNUC_MINOR__ >= 5
51794 +
51795 +#define __no_const __attribute__((no_const))
51796 +
51797 /*
51798 * Mark a position in code as unreachable. This can be used to
51799 * suppress control flow warnings after asm blocks that transfer
51800 @@ -46,6 +49,11 @@
51801 #define __noclone __attribute__((__noclone__))
51802
51803 #endif
51804 +
51805 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51806 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51807 +#define __bos0(ptr) __bos((ptr), 0)
51808 +#define __bos1(ptr) __bos((ptr), 1)
51809 #endif
51810
51811 #if __GNUC_MINOR__ > 0
51812 diff -urNp linux-2.6.39.4/include/linux/compiler.h linux-2.6.39.4/include/linux/compiler.h
51813 --- linux-2.6.39.4/include/linux/compiler.h 2011-05-19 00:06:34.000000000 -0400
51814 +++ linux-2.6.39.4/include/linux/compiler.h 2011-08-05 20:34:06.000000000 -0400
51815 @@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51816 # define __attribute_const__ /* unimplemented */
51817 #endif
51818
51819 +#ifndef __no_const
51820 +# define __no_const
51821 +#endif
51822 +
51823 /*
51824 * Tell gcc if a function is cold. The compiler will assume any path
51825 * directly leading to the call is unlikely.
51826 @@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51827 #define __cold
51828 #endif
51829
51830 +#ifndef __alloc_size
51831 +#define __alloc_size(...)
51832 +#endif
51833 +
51834 +#ifndef __bos
51835 +#define __bos(ptr, arg)
51836 +#endif
51837 +
51838 +#ifndef __bos0
51839 +#define __bos0(ptr)
51840 +#endif
51841 +
51842 +#ifndef __bos1
51843 +#define __bos1(ptr)
51844 +#endif
51845 +
51846 /* Simple shorthand for a section definition */
51847 #ifndef __section
51848 # define __section(S) __attribute__ ((__section__(#S)))
51849 @@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51850 * use is to mediate communication between process-level code and irq/NMI
51851 * handlers, all running on the same CPU.
51852 */
51853 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51854 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51855 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51856
51857 #endif /* __LINUX_COMPILER_H */
51858 diff -urNp linux-2.6.39.4/include/linux/cpuset.h linux-2.6.39.4/include/linux/cpuset.h
51859 --- linux-2.6.39.4/include/linux/cpuset.h 2011-05-19 00:06:34.000000000 -0400
51860 +++ linux-2.6.39.4/include/linux/cpuset.h 2011-08-05 19:44:37.000000000 -0400
51861 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51862 * nodemask.
51863 */
51864 smp_mb();
51865 - --ACCESS_ONCE(current->mems_allowed_change_disable);
51866 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51867 }
51868
51869 static inline void set_mems_allowed(nodemask_t nodemask)
51870 diff -urNp linux-2.6.39.4/include/linux/crypto.h linux-2.6.39.4/include/linux/crypto.h
51871 --- linux-2.6.39.4/include/linux/crypto.h 2011-05-19 00:06:34.000000000 -0400
51872 +++ linux-2.6.39.4/include/linux/crypto.h 2011-08-05 20:34:06.000000000 -0400
51873 @@ -361,7 +361,7 @@ struct cipher_tfm {
51874 const u8 *key, unsigned int keylen);
51875 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51876 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51877 -};
51878 +} __no_const;
51879
51880 struct hash_tfm {
51881 int (*init)(struct hash_desc *desc);
51882 @@ -382,13 +382,13 @@ struct compress_tfm {
51883 int (*cot_decompress)(struct crypto_tfm *tfm,
51884 const u8 *src, unsigned int slen,
51885 u8 *dst, unsigned int *dlen);
51886 -};
51887 +} __no_const;
51888
51889 struct rng_tfm {
51890 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51891 unsigned int dlen);
51892 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51893 -};
51894 +} __no_const;
51895
51896 #define crt_ablkcipher crt_u.ablkcipher
51897 #define crt_aead crt_u.aead
51898 diff -urNp linux-2.6.39.4/include/linux/decompress/mm.h linux-2.6.39.4/include/linux/decompress/mm.h
51899 --- linux-2.6.39.4/include/linux/decompress/mm.h 2011-05-19 00:06:34.000000000 -0400
51900 +++ linux-2.6.39.4/include/linux/decompress/mm.h 2011-08-05 19:44:37.000000000 -0400
51901 @@ -77,7 +77,7 @@ static void free(void *where)
51902 * warnings when not needed (indeed large_malloc / large_free are not
51903 * needed by inflate */
51904
51905 -#define malloc(a) kmalloc(a, GFP_KERNEL)
51906 +#define malloc(a) kmalloc((a), GFP_KERNEL)
51907 #define free(a) kfree(a)
51908
51909 #define large_malloc(a) vmalloc(a)
51910 diff -urNp linux-2.6.39.4/include/linux/dma-mapping.h linux-2.6.39.4/include/linux/dma-mapping.h
51911 --- linux-2.6.39.4/include/linux/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
51912 +++ linux-2.6.39.4/include/linux/dma-mapping.h 2011-08-05 20:34:06.000000000 -0400
51913 @@ -49,7 +49,7 @@ struct dma_map_ops {
51914 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
51915 int (*dma_supported)(struct device *dev, u64 mask);
51916 int (*set_dma_mask)(struct device *dev, u64 mask);
51917 - int is_phys;
51918 + const int is_phys;
51919 };
51920
51921 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
51922 diff -urNp linux-2.6.39.4/include/linux/efi.h linux-2.6.39.4/include/linux/efi.h
51923 --- linux-2.6.39.4/include/linux/efi.h 2011-06-25 12:55:23.000000000 -0400
51924 +++ linux-2.6.39.4/include/linux/efi.h 2011-08-05 20:34:06.000000000 -0400
51925 @@ -409,7 +409,7 @@ struct efivar_operations {
51926 efi_get_variable_t *get_variable;
51927 efi_get_next_variable_t *get_next_variable;
51928 efi_set_variable_t *set_variable;
51929 -};
51930 +} __no_const;
51931
51932 struct efivars {
51933 /*
51934 diff -urNp linux-2.6.39.4/include/linux/elf.h linux-2.6.39.4/include/linux/elf.h
51935 --- linux-2.6.39.4/include/linux/elf.h 2011-05-19 00:06:34.000000000 -0400
51936 +++ linux-2.6.39.4/include/linux/elf.h 2011-08-05 19:44:37.000000000 -0400
51937 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
51938 #define PT_GNU_EH_FRAME 0x6474e550
51939
51940 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
51941 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
51942 +
51943 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
51944 +
51945 +/* Constants for the e_flags field */
51946 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51947 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
51948 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
51949 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
51950 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51951 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51952
51953 /*
51954 * Extended Numbering
51955 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
51956 #define DT_DEBUG 21
51957 #define DT_TEXTREL 22
51958 #define DT_JMPREL 23
51959 +#define DT_FLAGS 30
51960 + #define DF_TEXTREL 0x00000004
51961 #define DT_ENCODING 32
51962 #define OLD_DT_LOOS 0x60000000
51963 #define DT_LOOS 0x6000000d
51964 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
51965 #define PF_W 0x2
51966 #define PF_X 0x1
51967
51968 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
51969 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
51970 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
51971 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
51972 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
51973 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
51974 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
51975 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
51976 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
51977 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
51978 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
51979 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
51980 +
51981 typedef struct elf32_phdr{
51982 Elf32_Word p_type;
51983 Elf32_Off p_offset;
51984 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
51985 #define EI_OSABI 7
51986 #define EI_PAD 8
51987
51988 +#define EI_PAX 14
51989 +
51990 #define ELFMAG0 0x7f /* EI_MAG */
51991 #define ELFMAG1 'E'
51992 #define ELFMAG2 'L'
51993 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
51994 #define elf_note elf32_note
51995 #define elf_addr_t Elf32_Off
51996 #define Elf_Half Elf32_Half
51997 +#define elf_dyn Elf32_Dyn
51998
51999 #else
52000
52001 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
52002 #define elf_note elf64_note
52003 #define elf_addr_t Elf64_Off
52004 #define Elf_Half Elf64_Half
52005 +#define elf_dyn Elf64_Dyn
52006
52007 #endif
52008
52009 diff -urNp linux-2.6.39.4/include/linux/firewire.h linux-2.6.39.4/include/linux/firewire.h
52010 --- linux-2.6.39.4/include/linux/firewire.h 2011-05-19 00:06:34.000000000 -0400
52011 +++ linux-2.6.39.4/include/linux/firewire.h 2011-08-05 20:34:06.000000000 -0400
52012 @@ -429,7 +429,7 @@ struct fw_iso_context {
52013 union {
52014 fw_iso_callback_t sc;
52015 fw_iso_mc_callback_t mc;
52016 - } callback;
52017 + } __no_const callback;
52018 void *callback_data;
52019 };
52020
52021 diff -urNp linux-2.6.39.4/include/linux/fscache-cache.h linux-2.6.39.4/include/linux/fscache-cache.h
52022 --- linux-2.6.39.4/include/linux/fscache-cache.h 2011-05-19 00:06:34.000000000 -0400
52023 +++ linux-2.6.39.4/include/linux/fscache-cache.h 2011-08-05 19:44:37.000000000 -0400
52024 @@ -113,7 +113,7 @@ struct fscache_operation {
52025 #endif
52026 };
52027
52028 -extern atomic_t fscache_op_debug_id;
52029 +extern atomic_unchecked_t fscache_op_debug_id;
52030 extern void fscache_op_work_func(struct work_struct *work);
52031
52032 extern void fscache_enqueue_operation(struct fscache_operation *);
52033 @@ -133,7 +133,7 @@ static inline void fscache_operation_ini
52034 {
52035 INIT_WORK(&op->work, fscache_op_work_func);
52036 atomic_set(&op->usage, 1);
52037 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
52038 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52039 op->processor = processor;
52040 op->release = release;
52041 INIT_LIST_HEAD(&op->pend_link);
52042 diff -urNp linux-2.6.39.4/include/linux/fs.h linux-2.6.39.4/include/linux/fs.h
52043 --- linux-2.6.39.4/include/linux/fs.h 2011-05-19 00:06:34.000000000 -0400
52044 +++ linux-2.6.39.4/include/linux/fs.h 2011-08-05 20:34:06.000000000 -0400
52045 @@ -108,6 +108,11 @@ struct inodes_stat_t {
52046 /* File was opened by fanotify and shouldn't generate fanotify events */
52047 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
52048
52049 +/* Hack for grsec so as not to require read permission simply to execute
52050 + * a binary
52051 + */
52052 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
52053 +
52054 /*
52055 * The below are the various read and write types that we support. Some of
52056 * them include behavioral modifiers that send information down to the
52057 @@ -1535,7 +1540,7 @@ struct block_device_operations;
52058 * the big kernel lock held in all filesystems.
52059 */
52060 struct file_operations {
52061 - struct module *owner;
52062 + struct module * const owner;
52063 loff_t (*llseek) (struct file *, loff_t, int);
52064 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
52065 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
52066 @@ -1563,6 +1568,7 @@ struct file_operations {
52067 long (*fallocate)(struct file *file, int mode, loff_t offset,
52068 loff_t len);
52069 };
52070 +typedef struct file_operations __no_const file_operations_no_const;
52071
52072 #define IPERM_FLAG_RCU 0x0001
52073
52074 diff -urNp linux-2.6.39.4/include/linux/fs_struct.h linux-2.6.39.4/include/linux/fs_struct.h
52075 --- linux-2.6.39.4/include/linux/fs_struct.h 2011-05-19 00:06:34.000000000 -0400
52076 +++ linux-2.6.39.4/include/linux/fs_struct.h 2011-08-05 19:44:37.000000000 -0400
52077 @@ -6,7 +6,7 @@
52078 #include <linux/seqlock.h>
52079
52080 struct fs_struct {
52081 - int users;
52082 + atomic_t users;
52083 spinlock_t lock;
52084 seqcount_t seq;
52085 int umask;
52086 diff -urNp linux-2.6.39.4/include/linux/ftrace_event.h linux-2.6.39.4/include/linux/ftrace_event.h
52087 --- linux-2.6.39.4/include/linux/ftrace_event.h 2011-05-19 00:06:34.000000000 -0400
52088 +++ linux-2.6.39.4/include/linux/ftrace_event.h 2011-08-05 20:34:06.000000000 -0400
52089 @@ -84,7 +84,7 @@ struct trace_event_functions {
52090 trace_print_func raw;
52091 trace_print_func hex;
52092 trace_print_func binary;
52093 -};
52094 +} __no_const;
52095
52096 struct trace_event {
52097 struct hlist_node node;
52098 @@ -235,7 +235,7 @@ extern int trace_define_field(struct ftr
52099 extern int trace_add_event_call(struct ftrace_event_call *call);
52100 extern void trace_remove_event_call(struct ftrace_event_call *call);
52101
52102 -#define is_signed_type(type) (((type)(-1)) < 0)
52103 +#define is_signed_type(type) (((type)(-1)) < (type)1)
52104
52105 int trace_set_clr_event(const char *system, const char *event, int set);
52106
52107 diff -urNp linux-2.6.39.4/include/linux/genhd.h linux-2.6.39.4/include/linux/genhd.h
52108 --- linux-2.6.39.4/include/linux/genhd.h 2011-06-03 00:04:14.000000000 -0400
52109 +++ linux-2.6.39.4/include/linux/genhd.h 2011-08-05 19:44:37.000000000 -0400
52110 @@ -184,7 +184,7 @@ struct gendisk {
52111 struct kobject *slave_dir;
52112
52113 struct timer_rand_state *random;
52114 - atomic_t sync_io; /* RAID */
52115 + atomic_unchecked_t sync_io; /* RAID */
52116 struct disk_events *ev;
52117 #ifdef CONFIG_BLK_DEV_INTEGRITY
52118 struct blk_integrity *integrity;
52119 diff -urNp linux-2.6.39.4/include/linux/gracl.h linux-2.6.39.4/include/linux/gracl.h
52120 --- linux-2.6.39.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52121 +++ linux-2.6.39.4/include/linux/gracl.h 2011-08-05 19:44:37.000000000 -0400
52122 @@ -0,0 +1,317 @@
52123 +#ifndef GR_ACL_H
52124 +#define GR_ACL_H
52125 +
52126 +#include <linux/grdefs.h>
52127 +#include <linux/resource.h>
52128 +#include <linux/capability.h>
52129 +#include <linux/dcache.h>
52130 +#include <asm/resource.h>
52131 +
52132 +/* Major status information */
52133 +
52134 +#define GR_VERSION "grsecurity 2.2.2"
52135 +#define GRSECURITY_VERSION 0x2202
52136 +
52137 +enum {
52138 + GR_SHUTDOWN = 0,
52139 + GR_ENABLE = 1,
52140 + GR_SPROLE = 2,
52141 + GR_RELOAD = 3,
52142 + GR_SEGVMOD = 4,
52143 + GR_STATUS = 5,
52144 + GR_UNSPROLE = 6,
52145 + GR_PASSSET = 7,
52146 + GR_SPROLEPAM = 8,
52147 +};
52148 +
52149 +/* Password setup definitions
52150 + * kernel/grhash.c */
52151 +enum {
52152 + GR_PW_LEN = 128,
52153 + GR_SALT_LEN = 16,
52154 + GR_SHA_LEN = 32,
52155 +};
52156 +
52157 +enum {
52158 + GR_SPROLE_LEN = 64,
52159 +};
52160 +
52161 +enum {
52162 + GR_NO_GLOB = 0,
52163 + GR_REG_GLOB,
52164 + GR_CREATE_GLOB
52165 +};
52166 +
52167 +#define GR_NLIMITS 32
52168 +
52169 +/* Begin Data Structures */
52170 +
52171 +struct sprole_pw {
52172 + unsigned char *rolename;
52173 + unsigned char salt[GR_SALT_LEN];
52174 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52175 +};
52176 +
52177 +struct name_entry {
52178 + __u32 key;
52179 + ino_t inode;
52180 + dev_t device;
52181 + char *name;
52182 + __u16 len;
52183 + __u8 deleted;
52184 + struct name_entry *prev;
52185 + struct name_entry *next;
52186 +};
52187 +
52188 +struct inodev_entry {
52189 + struct name_entry *nentry;
52190 + struct inodev_entry *prev;
52191 + struct inodev_entry *next;
52192 +};
52193 +
52194 +struct acl_role_db {
52195 + struct acl_role_label **r_hash;
52196 + __u32 r_size;
52197 +};
52198 +
52199 +struct inodev_db {
52200 + struct inodev_entry **i_hash;
52201 + __u32 i_size;
52202 +};
52203 +
52204 +struct name_db {
52205 + struct name_entry **n_hash;
52206 + __u32 n_size;
52207 +};
52208 +
52209 +struct crash_uid {
52210 + uid_t uid;
52211 + unsigned long expires;
52212 +};
52213 +
52214 +struct gr_hash_struct {
52215 + void **table;
52216 + void **nametable;
52217 + void *first;
52218 + __u32 table_size;
52219 + __u32 used_size;
52220 + int type;
52221 +};
52222 +
52223 +/* Userspace Grsecurity ACL data structures */
52224 +
52225 +struct acl_subject_label {
52226 + char *filename;
52227 + ino_t inode;
52228 + dev_t device;
52229 + __u32 mode;
52230 + kernel_cap_t cap_mask;
52231 + kernel_cap_t cap_lower;
52232 + kernel_cap_t cap_invert_audit;
52233 +
52234 + struct rlimit res[GR_NLIMITS];
52235 + __u32 resmask;
52236 +
52237 + __u8 user_trans_type;
52238 + __u8 group_trans_type;
52239 + uid_t *user_transitions;
52240 + gid_t *group_transitions;
52241 + __u16 user_trans_num;
52242 + __u16 group_trans_num;
52243 +
52244 + __u32 sock_families[2];
52245 + __u32 ip_proto[8];
52246 + __u32 ip_type;
52247 + struct acl_ip_label **ips;
52248 + __u32 ip_num;
52249 + __u32 inaddr_any_override;
52250 +
52251 + __u32 crashes;
52252 + unsigned long expires;
52253 +
52254 + struct acl_subject_label *parent_subject;
52255 + struct gr_hash_struct *hash;
52256 + struct acl_subject_label *prev;
52257 + struct acl_subject_label *next;
52258 +
52259 + struct acl_object_label **obj_hash;
52260 + __u32 obj_hash_size;
52261 + __u16 pax_flags;
52262 +};
52263 +
52264 +struct role_allowed_ip {
52265 + __u32 addr;
52266 + __u32 netmask;
52267 +
52268 + struct role_allowed_ip *prev;
52269 + struct role_allowed_ip *next;
52270 +};
52271 +
52272 +struct role_transition {
52273 + char *rolename;
52274 +
52275 + struct role_transition *prev;
52276 + struct role_transition *next;
52277 +};
52278 +
52279 +struct acl_role_label {
52280 + char *rolename;
52281 + uid_t uidgid;
52282 + __u16 roletype;
52283 +
52284 + __u16 auth_attempts;
52285 + unsigned long expires;
52286 +
52287 + struct acl_subject_label *root_label;
52288 + struct gr_hash_struct *hash;
52289 +
52290 + struct acl_role_label *prev;
52291 + struct acl_role_label *next;
52292 +
52293 + struct role_transition *transitions;
52294 + struct role_allowed_ip *allowed_ips;
52295 + uid_t *domain_children;
52296 + __u16 domain_child_num;
52297 +
52298 + struct acl_subject_label **subj_hash;
52299 + __u32 subj_hash_size;
52300 +};
52301 +
52302 +struct user_acl_role_db {
52303 + struct acl_role_label **r_table;
52304 + __u32 num_pointers; /* Number of allocations to track */
52305 + __u32 num_roles; /* Number of roles */
52306 + __u32 num_domain_children; /* Number of domain children */
52307 + __u32 num_subjects; /* Number of subjects */
52308 + __u32 num_objects; /* Number of objects */
52309 +};
52310 +
52311 +struct acl_object_label {
52312 + char *filename;
52313 + ino_t inode;
52314 + dev_t device;
52315 + __u32 mode;
52316 +
52317 + struct acl_subject_label *nested;
52318 + struct acl_object_label *globbed;
52319 +
52320 + /* next two structures not used */
52321 +
52322 + struct acl_object_label *prev;
52323 + struct acl_object_label *next;
52324 +};
52325 +
52326 +struct acl_ip_label {
52327 + char *iface;
52328 + __u32 addr;
52329 + __u32 netmask;
52330 + __u16 low, high;
52331 + __u8 mode;
52332 + __u32 type;
52333 + __u32 proto[8];
52334 +
52335 + /* next two structures not used */
52336 +
52337 + struct acl_ip_label *prev;
52338 + struct acl_ip_label *next;
52339 +};
52340 +
52341 +struct gr_arg {
52342 + struct user_acl_role_db role_db;
52343 + unsigned char pw[GR_PW_LEN];
52344 + unsigned char salt[GR_SALT_LEN];
52345 + unsigned char sum[GR_SHA_LEN];
52346 + unsigned char sp_role[GR_SPROLE_LEN];
52347 + struct sprole_pw *sprole_pws;
52348 + dev_t segv_device;
52349 + ino_t segv_inode;
52350 + uid_t segv_uid;
52351 + __u16 num_sprole_pws;
52352 + __u16 mode;
52353 +};
52354 +
52355 +struct gr_arg_wrapper {
52356 + struct gr_arg *arg;
52357 + __u32 version;
52358 + __u32 size;
52359 +};
52360 +
52361 +struct subject_map {
52362 + struct acl_subject_label *user;
52363 + struct acl_subject_label *kernel;
52364 + struct subject_map *prev;
52365 + struct subject_map *next;
52366 +};
52367 +
52368 +struct acl_subj_map_db {
52369 + struct subject_map **s_hash;
52370 + __u32 s_size;
52371 +};
52372 +
52373 +/* End Data Structures Section */
52374 +
52375 +/* Hash functions generated by empirical testing by Brad Spengler
52376 + Makes good use of the low bits of the inode. Generally 0-1 times
52377 + in loop for successful match. 0-3 for unsuccessful match.
52378 + Shift/add algorithm with modulus of table size and an XOR*/
52379 +
52380 +static __inline__ unsigned int
52381 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52382 +{
52383 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
52384 +}
52385 +
52386 + static __inline__ unsigned int
52387 +shash(const struct acl_subject_label *userp, const unsigned int sz)
52388 +{
52389 + return ((const unsigned long)userp % sz);
52390 +}
52391 +
52392 +static __inline__ unsigned int
52393 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52394 +{
52395 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52396 +}
52397 +
52398 +static __inline__ unsigned int
52399 +nhash(const char *name, const __u16 len, const unsigned int sz)
52400 +{
52401 + return full_name_hash((const unsigned char *)name, len) % sz;
52402 +}
52403 +
52404 +#define FOR_EACH_ROLE_START(role) \
52405 + role = role_list; \
52406 + while (role) {
52407 +
52408 +#define FOR_EACH_ROLE_END(role) \
52409 + role = role->prev; \
52410 + }
52411 +
52412 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52413 + subj = NULL; \
52414 + iter = 0; \
52415 + while (iter < role->subj_hash_size) { \
52416 + if (subj == NULL) \
52417 + subj = role->subj_hash[iter]; \
52418 + if (subj == NULL) { \
52419 + iter++; \
52420 + continue; \
52421 + }
52422 +
52423 +#define FOR_EACH_SUBJECT_END(subj,iter) \
52424 + subj = subj->next; \
52425 + if (subj == NULL) \
52426 + iter++; \
52427 + }
52428 +
52429 +
52430 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52431 + subj = role->hash->first; \
52432 + while (subj != NULL) {
52433 +
52434 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52435 + subj = subj->next; \
52436 + }
52437 +
52438 +#endif
52439 +
52440 diff -urNp linux-2.6.39.4/include/linux/gralloc.h linux-2.6.39.4/include/linux/gralloc.h
52441 --- linux-2.6.39.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52442 +++ linux-2.6.39.4/include/linux/gralloc.h 2011-08-05 19:44:37.000000000 -0400
52443 @@ -0,0 +1,9 @@
52444 +#ifndef __GRALLOC_H
52445 +#define __GRALLOC_H
52446 +
52447 +void acl_free_all(void);
52448 +int acl_alloc_stack_init(unsigned long size);
52449 +void *acl_alloc(unsigned long len);
52450 +void *acl_alloc_num(unsigned long num, unsigned long len);
52451 +
52452 +#endif
52453 diff -urNp linux-2.6.39.4/include/linux/grdefs.h linux-2.6.39.4/include/linux/grdefs.h
52454 --- linux-2.6.39.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52455 +++ linux-2.6.39.4/include/linux/grdefs.h 2011-08-05 19:44:37.000000000 -0400
52456 @@ -0,0 +1,140 @@
52457 +#ifndef GRDEFS_H
52458 +#define GRDEFS_H
52459 +
52460 +/* Begin grsecurity status declarations */
52461 +
52462 +enum {
52463 + GR_READY = 0x01,
52464 + GR_STATUS_INIT = 0x00 // disabled state
52465 +};
52466 +
52467 +/* Begin ACL declarations */
52468 +
52469 +/* Role flags */
52470 +
52471 +enum {
52472 + GR_ROLE_USER = 0x0001,
52473 + GR_ROLE_GROUP = 0x0002,
52474 + GR_ROLE_DEFAULT = 0x0004,
52475 + GR_ROLE_SPECIAL = 0x0008,
52476 + GR_ROLE_AUTH = 0x0010,
52477 + GR_ROLE_NOPW = 0x0020,
52478 + GR_ROLE_GOD = 0x0040,
52479 + GR_ROLE_LEARN = 0x0080,
52480 + GR_ROLE_TPE = 0x0100,
52481 + GR_ROLE_DOMAIN = 0x0200,
52482 + GR_ROLE_PAM = 0x0400,
52483 + GR_ROLE_PERSIST = 0x0800
52484 +};
52485 +
52486 +/* ACL Subject and Object mode flags */
52487 +enum {
52488 + GR_DELETED = 0x80000000
52489 +};
52490 +
52491 +/* ACL Object-only mode flags */
52492 +enum {
52493 + GR_READ = 0x00000001,
52494 + GR_APPEND = 0x00000002,
52495 + GR_WRITE = 0x00000004,
52496 + GR_EXEC = 0x00000008,
52497 + GR_FIND = 0x00000010,
52498 + GR_INHERIT = 0x00000020,
52499 + GR_SETID = 0x00000040,
52500 + GR_CREATE = 0x00000080,
52501 + GR_DELETE = 0x00000100,
52502 + GR_LINK = 0x00000200,
52503 + GR_AUDIT_READ = 0x00000400,
52504 + GR_AUDIT_APPEND = 0x00000800,
52505 + GR_AUDIT_WRITE = 0x00001000,
52506 + GR_AUDIT_EXEC = 0x00002000,
52507 + GR_AUDIT_FIND = 0x00004000,
52508 + GR_AUDIT_INHERIT= 0x00008000,
52509 + GR_AUDIT_SETID = 0x00010000,
52510 + GR_AUDIT_CREATE = 0x00020000,
52511 + GR_AUDIT_DELETE = 0x00040000,
52512 + GR_AUDIT_LINK = 0x00080000,
52513 + GR_PTRACERD = 0x00100000,
52514 + GR_NOPTRACE = 0x00200000,
52515 + GR_SUPPRESS = 0x00400000,
52516 + GR_NOLEARN = 0x00800000,
52517 + GR_INIT_TRANSFER= 0x01000000
52518 +};
52519 +
52520 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52521 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52522 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52523 +
52524 +/* ACL subject-only mode flags */
52525 +enum {
52526 + GR_KILL = 0x00000001,
52527 + GR_VIEW = 0x00000002,
52528 + GR_PROTECTED = 0x00000004,
52529 + GR_LEARN = 0x00000008,
52530 + GR_OVERRIDE = 0x00000010,
52531 + /* just a placeholder, this mode is only used in userspace */
52532 + GR_DUMMY = 0x00000020,
52533 + GR_PROTSHM = 0x00000040,
52534 + GR_KILLPROC = 0x00000080,
52535 + GR_KILLIPPROC = 0x00000100,
52536 + /* just a placeholder, this mode is only used in userspace */
52537 + GR_NOTROJAN = 0x00000200,
52538 + GR_PROTPROCFD = 0x00000400,
52539 + GR_PROCACCT = 0x00000800,
52540 + GR_RELAXPTRACE = 0x00001000,
52541 + GR_NESTED = 0x00002000,
52542 + GR_INHERITLEARN = 0x00004000,
52543 + GR_PROCFIND = 0x00008000,
52544 + GR_POVERRIDE = 0x00010000,
52545 + GR_KERNELAUTH = 0x00020000,
52546 + GR_ATSECURE = 0x00040000,
52547 + GR_SHMEXEC = 0x00080000
52548 +};
52549 +
52550 +enum {
52551 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52552 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52553 + GR_PAX_ENABLE_MPROTECT = 0x0004,
52554 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
52555 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52556 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52557 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52558 + GR_PAX_DISABLE_MPROTECT = 0x0400,
52559 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
52560 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52561 +};
52562 +
52563 +enum {
52564 + GR_ID_USER = 0x01,
52565 + GR_ID_GROUP = 0x02,
52566 +};
52567 +
52568 +enum {
52569 + GR_ID_ALLOW = 0x01,
52570 + GR_ID_DENY = 0x02,
52571 +};
52572 +
52573 +#define GR_CRASH_RES 31
52574 +#define GR_UIDTABLE_MAX 500
52575 +
52576 +/* begin resource learning section */
52577 +enum {
52578 + GR_RLIM_CPU_BUMP = 60,
52579 + GR_RLIM_FSIZE_BUMP = 50000,
52580 + GR_RLIM_DATA_BUMP = 10000,
52581 + GR_RLIM_STACK_BUMP = 1000,
52582 + GR_RLIM_CORE_BUMP = 10000,
52583 + GR_RLIM_RSS_BUMP = 500000,
52584 + GR_RLIM_NPROC_BUMP = 1,
52585 + GR_RLIM_NOFILE_BUMP = 5,
52586 + GR_RLIM_MEMLOCK_BUMP = 50000,
52587 + GR_RLIM_AS_BUMP = 500000,
52588 + GR_RLIM_LOCKS_BUMP = 2,
52589 + GR_RLIM_SIGPENDING_BUMP = 5,
52590 + GR_RLIM_MSGQUEUE_BUMP = 10000,
52591 + GR_RLIM_NICE_BUMP = 1,
52592 + GR_RLIM_RTPRIO_BUMP = 1,
52593 + GR_RLIM_RTTIME_BUMP = 1000000
52594 +};
52595 +
52596 +#endif
52597 diff -urNp linux-2.6.39.4/include/linux/grinternal.h linux-2.6.39.4/include/linux/grinternal.h
52598 --- linux-2.6.39.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52599 +++ linux-2.6.39.4/include/linux/grinternal.h 2011-08-05 19:44:37.000000000 -0400
52600 @@ -0,0 +1,219 @@
52601 +#ifndef __GRINTERNAL_H
52602 +#define __GRINTERNAL_H
52603 +
52604 +#ifdef CONFIG_GRKERNSEC
52605 +
52606 +#include <linux/fs.h>
52607 +#include <linux/mnt_namespace.h>
52608 +#include <linux/nsproxy.h>
52609 +#include <linux/gracl.h>
52610 +#include <linux/grdefs.h>
52611 +#include <linux/grmsg.h>
52612 +
52613 +void gr_add_learn_entry(const char *fmt, ...)
52614 + __attribute__ ((format (printf, 1, 2)));
52615 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52616 + const struct vfsmount *mnt);
52617 +__u32 gr_check_create(const struct dentry *new_dentry,
52618 + const struct dentry *parent,
52619 + const struct vfsmount *mnt, const __u32 mode);
52620 +int gr_check_protected_task(const struct task_struct *task);
52621 +__u32 to_gr_audit(const __u32 reqmode);
52622 +int gr_set_acls(const int type);
52623 +int gr_apply_subject_to_task(struct task_struct *task);
52624 +int gr_acl_is_enabled(void);
52625 +char gr_roletype_to_char(void);
52626 +
52627 +void gr_handle_alertkill(struct task_struct *task);
52628 +char *gr_to_filename(const struct dentry *dentry,
52629 + const struct vfsmount *mnt);
52630 +char *gr_to_filename1(const struct dentry *dentry,
52631 + const struct vfsmount *mnt);
52632 +char *gr_to_filename2(const struct dentry *dentry,
52633 + const struct vfsmount *mnt);
52634 +char *gr_to_filename3(const struct dentry *dentry,
52635 + const struct vfsmount *mnt);
52636 +
52637 +extern int grsec_enable_harden_ptrace;
52638 +extern int grsec_enable_link;
52639 +extern int grsec_enable_fifo;
52640 +extern int grsec_enable_execve;
52641 +extern int grsec_enable_shm;
52642 +extern int grsec_enable_execlog;
52643 +extern int grsec_enable_signal;
52644 +extern int grsec_enable_audit_ptrace;
52645 +extern int grsec_enable_forkfail;
52646 +extern int grsec_enable_time;
52647 +extern int grsec_enable_rofs;
52648 +extern int grsec_enable_chroot_shmat;
52649 +extern int grsec_enable_chroot_mount;
52650 +extern int grsec_enable_chroot_double;
52651 +extern int grsec_enable_chroot_pivot;
52652 +extern int grsec_enable_chroot_chdir;
52653 +extern int grsec_enable_chroot_chmod;
52654 +extern int grsec_enable_chroot_mknod;
52655 +extern int grsec_enable_chroot_fchdir;
52656 +extern int grsec_enable_chroot_nice;
52657 +extern int grsec_enable_chroot_execlog;
52658 +extern int grsec_enable_chroot_caps;
52659 +extern int grsec_enable_chroot_sysctl;
52660 +extern int grsec_enable_chroot_unix;
52661 +extern int grsec_enable_tpe;
52662 +extern int grsec_tpe_gid;
52663 +extern int grsec_enable_tpe_all;
52664 +extern int grsec_enable_tpe_invert;
52665 +extern int grsec_enable_socket_all;
52666 +extern int grsec_socket_all_gid;
52667 +extern int grsec_enable_socket_client;
52668 +extern int grsec_socket_client_gid;
52669 +extern int grsec_enable_socket_server;
52670 +extern int grsec_socket_server_gid;
52671 +extern int grsec_audit_gid;
52672 +extern int grsec_enable_group;
52673 +extern int grsec_enable_audit_textrel;
52674 +extern int grsec_enable_log_rwxmaps;
52675 +extern int grsec_enable_mount;
52676 +extern int grsec_enable_chdir;
52677 +extern int grsec_resource_logging;
52678 +extern int grsec_enable_blackhole;
52679 +extern int grsec_lastack_retries;
52680 +extern int grsec_enable_brute;
52681 +extern int grsec_lock;
52682 +
52683 +extern spinlock_t grsec_alert_lock;
52684 +extern unsigned long grsec_alert_wtime;
52685 +extern unsigned long grsec_alert_fyet;
52686 +
52687 +extern spinlock_t grsec_audit_lock;
52688 +
52689 +extern rwlock_t grsec_exec_file_lock;
52690 +
52691 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52692 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52693 + (tsk)->exec_file->f_vfsmnt) : "/")
52694 +
52695 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52696 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52697 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52698 +
52699 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52700 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
52701 + (tsk)->exec_file->f_vfsmnt) : "/")
52702 +
52703 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52704 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52705 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52706 +
52707 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52708 +
52709 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52710 +
52711 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52712 + (task)->pid, (cred)->uid, \
52713 + (cred)->euid, (cred)->gid, (cred)->egid, \
52714 + gr_parent_task_fullpath(task), \
52715 + (task)->real_parent->comm, (task)->real_parent->pid, \
52716 + (pcred)->uid, (pcred)->euid, \
52717 + (pcred)->gid, (pcred)->egid
52718 +
52719 +#define GR_CHROOT_CAPS {{ \
52720 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52721 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52722 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52723 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52724 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52725 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52726 +
52727 +#define security_learn(normal_msg,args...) \
52728 +({ \
52729 + read_lock(&grsec_exec_file_lock); \
52730 + gr_add_learn_entry(normal_msg "\n", ## args); \
52731 + read_unlock(&grsec_exec_file_lock); \
52732 +})
52733 +
52734 +enum {
52735 + GR_DO_AUDIT,
52736 + GR_DONT_AUDIT,
52737 + /* used for non-audit messages that we shouldn't kill the task on */
52738 + GR_DONT_AUDIT_GOOD
52739 +};
52740 +
52741 +enum {
52742 + GR_TTYSNIFF,
52743 + GR_RBAC,
52744 + GR_RBAC_STR,
52745 + GR_STR_RBAC,
52746 + GR_RBAC_MODE2,
52747 + GR_RBAC_MODE3,
52748 + GR_FILENAME,
52749 + GR_SYSCTL_HIDDEN,
52750 + GR_NOARGS,
52751 + GR_ONE_INT,
52752 + GR_ONE_INT_TWO_STR,
52753 + GR_ONE_STR,
52754 + GR_STR_INT,
52755 + GR_TWO_STR_INT,
52756 + GR_TWO_INT,
52757 + GR_TWO_U64,
52758 + GR_THREE_INT,
52759 + GR_FIVE_INT_TWO_STR,
52760 + GR_TWO_STR,
52761 + GR_THREE_STR,
52762 + GR_FOUR_STR,
52763 + GR_STR_FILENAME,
52764 + GR_FILENAME_STR,
52765 + GR_FILENAME_TWO_INT,
52766 + GR_FILENAME_TWO_INT_STR,
52767 + GR_TEXTREL,
52768 + GR_PTRACE,
52769 + GR_RESOURCE,
52770 + GR_CAP,
52771 + GR_SIG,
52772 + GR_SIG2,
52773 + GR_CRASH1,
52774 + GR_CRASH2,
52775 + GR_PSACCT,
52776 + GR_RWXMAP
52777 +};
52778 +
52779 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52780 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52781 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52782 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52783 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52784 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52785 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52786 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52787 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52788 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52789 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52790 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52791 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52792 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52793 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52794 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52795 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52796 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52797 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52798 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52799 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52800 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52801 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52802 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52803 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52804 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52805 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52806 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52807 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52808 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52809 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52810 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52811 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52812 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52813 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52814 +
52815 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52816 +
52817 +#endif
52818 +
52819 +#endif
52820 diff -urNp linux-2.6.39.4/include/linux/grmsg.h linux-2.6.39.4/include/linux/grmsg.h
52821 --- linux-2.6.39.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52822 +++ linux-2.6.39.4/include/linux/grmsg.h 2011-08-05 19:44:37.000000000 -0400
52823 @@ -0,0 +1,108 @@
52824 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52825 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52826 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52827 +#define GR_STOPMOD_MSG "denied modification of module state by "
52828 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52829 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52830 +#define GR_IOPERM_MSG "denied use of ioperm() by "
52831 +#define GR_IOPL_MSG "denied use of iopl() by "
52832 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52833 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52834 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52835 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52836 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52837 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52838 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52839 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52840 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52841 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52842 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52843 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52844 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52845 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52846 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52847 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52848 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52849 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52850 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52851 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52852 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52853 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52854 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52855 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52856 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52857 +#define GR_NPROC_MSG "denied overstep of process limit by "
52858 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52859 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52860 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52861 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52862 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52863 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52864 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52865 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52866 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52867 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52868 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52869 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52870 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52871 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52872 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52873 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52874 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52875 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52876 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52877 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52878 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52879 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52880 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52881 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52882 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52883 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52884 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52885 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52886 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52887 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52888 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52889 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52890 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52891 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52892 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52893 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52894 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52895 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52896 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52897 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
52898 +#define GR_NICE_CHROOT_MSG "denied priority change by "
52899 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52900 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52901 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52902 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52903 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52904 +#define GR_TIME_MSG "time set by "
52905 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52906 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52907 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52908 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52909 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52910 +#define GR_BIND_MSG "denied bind() by "
52911 +#define GR_CONNECT_MSG "denied connect() by "
52912 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52913 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
52914 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
52915 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
52916 +#define GR_CAP_ACL_MSG "use of %s denied for "
52917 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
52918 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
52919 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
52920 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
52921 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
52922 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
52923 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
52924 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
52925 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
52926 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
52927 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
52928 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
52929 +#define GR_VM86_MSG "denied use of vm86 by "
52930 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
52931 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
52932 diff -urNp linux-2.6.39.4/include/linux/grsecurity.h linux-2.6.39.4/include/linux/grsecurity.h
52933 --- linux-2.6.39.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
52934 +++ linux-2.6.39.4/include/linux/grsecurity.h 2011-08-05 19:54:17.000000000 -0400
52935 @@ -0,0 +1,218 @@
52936 +#ifndef GR_SECURITY_H
52937 +#define GR_SECURITY_H
52938 +#include <linux/fs.h>
52939 +#include <linux/fs_struct.h>
52940 +#include <linux/binfmts.h>
52941 +#include <linux/gracl.h>
52942 +#include <linux/compat.h>
52943 +
52944 +/* notify of brain-dead configs */
52945 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52946 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
52947 +#endif
52948 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
52949 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
52950 +#endif
52951 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52952 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52953 +#endif
52954 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
52955 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
52956 +#endif
52957 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
52958 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
52959 +#endif
52960 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
52961 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
52962 +#endif
52963 +
52964 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
52965 +void gr_handle_brute_check(void);
52966 +void gr_handle_kernel_exploit(void);
52967 +int gr_process_user_ban(void);
52968 +
52969 +char gr_roletype_to_char(void);
52970 +
52971 +int gr_acl_enable_at_secure(void);
52972 +
52973 +int gr_check_user_change(int real, int effective, int fs);
52974 +int gr_check_group_change(int real, int effective, int fs);
52975 +
52976 +void gr_del_task_from_ip_table(struct task_struct *p);
52977 +
52978 +int gr_pid_is_chrooted(struct task_struct *p);
52979 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
52980 +int gr_handle_chroot_nice(void);
52981 +int gr_handle_chroot_sysctl(const int op);
52982 +int gr_handle_chroot_setpriority(struct task_struct *p,
52983 + const int niceval);
52984 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
52985 +int gr_handle_chroot_chroot(const struct dentry *dentry,
52986 + const struct vfsmount *mnt);
52987 +int gr_handle_chroot_caps(struct path *path);
52988 +void gr_handle_chroot_chdir(struct path *path);
52989 +int gr_handle_chroot_chmod(const struct dentry *dentry,
52990 + const struct vfsmount *mnt, const int mode);
52991 +int gr_handle_chroot_mknod(const struct dentry *dentry,
52992 + const struct vfsmount *mnt, const int mode);
52993 +int gr_handle_chroot_mount(const struct dentry *dentry,
52994 + const struct vfsmount *mnt,
52995 + const char *dev_name);
52996 +int gr_handle_chroot_pivot(void);
52997 +int gr_handle_chroot_unix(const pid_t pid);
52998 +
52999 +int gr_handle_rawio(const struct inode *inode);
53000 +int gr_handle_nproc(void);
53001 +
53002 +void gr_handle_ioperm(void);
53003 +void gr_handle_iopl(void);
53004 +
53005 +int gr_tpe_allow(const struct file *file);
53006 +
53007 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
53008 +void gr_clear_chroot_entries(struct task_struct *task);
53009 +
53010 +void gr_log_forkfail(const int retval);
53011 +void gr_log_timechange(void);
53012 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
53013 +void gr_log_chdir(const struct dentry *dentry,
53014 + const struct vfsmount *mnt);
53015 +void gr_log_chroot_exec(const struct dentry *dentry,
53016 + const struct vfsmount *mnt);
53017 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
53018 +#ifdef CONFIG_COMPAT
53019 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
53020 +#endif
53021 +void gr_log_remount(const char *devname, const int retval);
53022 +void gr_log_unmount(const char *devname, const int retval);
53023 +void gr_log_mount(const char *from, const char *to, const int retval);
53024 +void gr_log_textrel(struct vm_area_struct *vma);
53025 +void gr_log_rwxmmap(struct file *file);
53026 +void gr_log_rwxmprotect(struct file *file);
53027 +
53028 +int gr_handle_follow_link(const struct inode *parent,
53029 + const struct inode *inode,
53030 + const struct dentry *dentry,
53031 + const struct vfsmount *mnt);
53032 +int gr_handle_fifo(const struct dentry *dentry,
53033 + const struct vfsmount *mnt,
53034 + const struct dentry *dir, const int flag,
53035 + const int acc_mode);
53036 +int gr_handle_hardlink(const struct dentry *dentry,
53037 + const struct vfsmount *mnt,
53038 + struct inode *inode,
53039 + const int mode, const char *to);
53040 +
53041 +int gr_is_capable(const int cap);
53042 +int gr_is_capable_nolog(const int cap);
53043 +void gr_learn_resource(const struct task_struct *task, const int limit,
53044 + const unsigned long wanted, const int gt);
53045 +void gr_copy_label(struct task_struct *tsk);
53046 +void gr_handle_crash(struct task_struct *task, const int sig);
53047 +int gr_handle_signal(const struct task_struct *p, const int sig);
53048 +int gr_check_crash_uid(const uid_t uid);
53049 +int gr_check_protected_task(const struct task_struct *task);
53050 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
53051 +int gr_acl_handle_mmap(const struct file *file,
53052 + const unsigned long prot);
53053 +int gr_acl_handle_mprotect(const struct file *file,
53054 + const unsigned long prot);
53055 +int gr_check_hidden_task(const struct task_struct *tsk);
53056 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
53057 + const struct vfsmount *mnt);
53058 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
53059 + const struct vfsmount *mnt);
53060 +__u32 gr_acl_handle_access(const struct dentry *dentry,
53061 + const struct vfsmount *mnt, const int fmode);
53062 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
53063 + const struct vfsmount *mnt, mode_t mode);
53064 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
53065 + const struct vfsmount *mnt, mode_t mode);
53066 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
53067 + const struct vfsmount *mnt);
53068 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
53069 + const struct vfsmount *mnt);
53070 +int gr_handle_ptrace(struct task_struct *task, const long request);
53071 +int gr_handle_proc_ptrace(struct task_struct *task);
53072 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
53073 + const struct vfsmount *mnt);
53074 +int gr_check_crash_exec(const struct file *filp);
53075 +int gr_acl_is_enabled(void);
53076 +void gr_set_kernel_label(struct task_struct *task);
53077 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
53078 + const gid_t gid);
53079 +int gr_set_proc_label(const struct dentry *dentry,
53080 + const struct vfsmount *mnt,
53081 + const int unsafe_share);
53082 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
53083 + const struct vfsmount *mnt);
53084 +__u32 gr_acl_handle_open(const struct dentry *dentry,
53085 + const struct vfsmount *mnt, const int fmode);
53086 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
53087 + const struct dentry *p_dentry,
53088 + const struct vfsmount *p_mnt, const int fmode,
53089 + const int imode);
53090 +void gr_handle_create(const struct dentry *dentry,
53091 + const struct vfsmount *mnt);
53092 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
53093 + const struct dentry *parent_dentry,
53094 + const struct vfsmount *parent_mnt,
53095 + const int mode);
53096 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
53097 + const struct dentry *parent_dentry,
53098 + const struct vfsmount *parent_mnt);
53099 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
53100 + const struct vfsmount *mnt);
53101 +void gr_handle_delete(const ino_t ino, const dev_t dev);
53102 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
53103 + const struct vfsmount *mnt);
53104 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53105 + const struct dentry *parent_dentry,
53106 + const struct vfsmount *parent_mnt,
53107 + const char *from);
53108 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53109 + const struct dentry *parent_dentry,
53110 + const struct vfsmount *parent_mnt,
53111 + const struct dentry *old_dentry,
53112 + const struct vfsmount *old_mnt, const char *to);
53113 +int gr_acl_handle_rename(struct dentry *new_dentry,
53114 + struct dentry *parent_dentry,
53115 + const struct vfsmount *parent_mnt,
53116 + struct dentry *old_dentry,
53117 + struct inode *old_parent_inode,
53118 + struct vfsmount *old_mnt, const char *newname);
53119 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53120 + struct dentry *old_dentry,
53121 + struct dentry *new_dentry,
53122 + struct vfsmount *mnt, const __u8 replace);
53123 +__u32 gr_check_link(const struct dentry *new_dentry,
53124 + const struct dentry *parent_dentry,
53125 + const struct vfsmount *parent_mnt,
53126 + const struct dentry *old_dentry,
53127 + const struct vfsmount *old_mnt);
53128 +int gr_acl_handle_filldir(const struct file *file, const char *name,
53129 + const unsigned int namelen, const ino_t ino);
53130 +
53131 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
53132 + const struct vfsmount *mnt);
53133 +void gr_acl_handle_exit(void);
53134 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
53135 +int gr_acl_handle_procpidmem(const struct task_struct *task);
53136 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53137 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53138 +void gr_audit_ptrace(struct task_struct *task);
53139 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53140 +
53141 +#ifdef CONFIG_GRKERNSEC
53142 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53143 +void gr_handle_vm86(void);
53144 +void gr_handle_mem_readwrite(u64 from, u64 to);
53145 +
53146 +extern int grsec_enable_dmesg;
53147 +extern int grsec_disable_privio;
53148 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53149 +extern int grsec_enable_chroot_findtask;
53150 +#endif
53151 +#endif
53152 +
53153 +#endif
53154 diff -urNp linux-2.6.39.4/include/linux/grsock.h linux-2.6.39.4/include/linux/grsock.h
53155 --- linux-2.6.39.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53156 +++ linux-2.6.39.4/include/linux/grsock.h 2011-08-05 19:44:37.000000000 -0400
53157 @@ -0,0 +1,19 @@
53158 +#ifndef __GRSOCK_H
53159 +#define __GRSOCK_H
53160 +
53161 +extern void gr_attach_curr_ip(const struct sock *sk);
53162 +extern int gr_handle_sock_all(const int family, const int type,
53163 + const int protocol);
53164 +extern int gr_handle_sock_server(const struct sockaddr *sck);
53165 +extern int gr_handle_sock_server_other(const struct sock *sck);
53166 +extern int gr_handle_sock_client(const struct sockaddr *sck);
53167 +extern int gr_search_connect(struct socket * sock,
53168 + struct sockaddr_in * addr);
53169 +extern int gr_search_bind(struct socket * sock,
53170 + struct sockaddr_in * addr);
53171 +extern int gr_search_listen(struct socket * sock);
53172 +extern int gr_search_accept(struct socket * sock);
53173 +extern int gr_search_socket(const int domain, const int type,
53174 + const int protocol);
53175 +
53176 +#endif
53177 diff -urNp linux-2.6.39.4/include/linux/highmem.h linux-2.6.39.4/include/linux/highmem.h
53178 --- linux-2.6.39.4/include/linux/highmem.h 2011-05-19 00:06:34.000000000 -0400
53179 +++ linux-2.6.39.4/include/linux/highmem.h 2011-08-05 19:44:37.000000000 -0400
53180 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53181 kunmap_atomic(kaddr, KM_USER0);
53182 }
53183
53184 +static inline void sanitize_highpage(struct page *page)
53185 +{
53186 + void *kaddr;
53187 + unsigned long flags;
53188 +
53189 + local_irq_save(flags);
53190 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
53191 + clear_page(kaddr);
53192 + kunmap_atomic(kaddr, KM_CLEARPAGE);
53193 + local_irq_restore(flags);
53194 +}
53195 +
53196 static inline void zero_user_segments(struct page *page,
53197 unsigned start1, unsigned end1,
53198 unsigned start2, unsigned end2)
53199 diff -urNp linux-2.6.39.4/include/linux/i2c.h linux-2.6.39.4/include/linux/i2c.h
53200 --- linux-2.6.39.4/include/linux/i2c.h 2011-05-19 00:06:34.000000000 -0400
53201 +++ linux-2.6.39.4/include/linux/i2c.h 2011-08-05 20:34:06.000000000 -0400
53202 @@ -346,6 +346,7 @@ struct i2c_algorithm {
53203 /* To determine what the adapter supports */
53204 u32 (*functionality) (struct i2c_adapter *);
53205 };
53206 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53207
53208 /*
53209 * i2c_adapter is the structure used to identify a physical i2c bus along
53210 diff -urNp linux-2.6.39.4/include/linux/i2o.h linux-2.6.39.4/include/linux/i2o.h
53211 --- linux-2.6.39.4/include/linux/i2o.h 2011-05-19 00:06:34.000000000 -0400
53212 +++ linux-2.6.39.4/include/linux/i2o.h 2011-08-05 19:44:37.000000000 -0400
53213 @@ -564,7 +564,7 @@ struct i2o_controller {
53214 struct i2o_device *exec; /* Executive */
53215 #if BITS_PER_LONG == 64
53216 spinlock_t context_list_lock; /* lock for context_list */
53217 - atomic_t context_list_counter; /* needed for unique contexts */
53218 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53219 struct list_head context_list; /* list of context id's
53220 and pointers */
53221 #endif
53222 diff -urNp linux-2.6.39.4/include/linux/init.h linux-2.6.39.4/include/linux/init.h
53223 --- linux-2.6.39.4/include/linux/init.h 2011-05-19 00:06:34.000000000 -0400
53224 +++ linux-2.6.39.4/include/linux/init.h 2011-08-05 19:44:37.000000000 -0400
53225 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53226
53227 /* Each module must use one module_init(). */
53228 #define module_init(initfn) \
53229 - static inline initcall_t __inittest(void) \
53230 + static inline __used initcall_t __inittest(void) \
53231 { return initfn; } \
53232 int init_module(void) __attribute__((alias(#initfn)));
53233
53234 /* This is only required if you want to be unloadable. */
53235 #define module_exit(exitfn) \
53236 - static inline exitcall_t __exittest(void) \
53237 + static inline __used exitcall_t __exittest(void) \
53238 { return exitfn; } \
53239 void cleanup_module(void) __attribute__((alias(#exitfn)));
53240
53241 diff -urNp linux-2.6.39.4/include/linux/init_task.h linux-2.6.39.4/include/linux/init_task.h
53242 --- linux-2.6.39.4/include/linux/init_task.h 2011-05-19 00:06:34.000000000 -0400
53243 +++ linux-2.6.39.4/include/linux/init_task.h 2011-08-05 19:44:37.000000000 -0400
53244 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
53245 #define INIT_IDS
53246 #endif
53247
53248 +#ifdef CONFIG_X86
53249 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53250 +#else
53251 +#define INIT_TASK_THREAD_INFO
53252 +#endif
53253 +
53254 /*
53255 * Because of the reduced scope of CAP_SETPCAP when filesystem
53256 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
53257 @@ -163,6 +169,7 @@ extern struct cred init_cred;
53258 RCU_INIT_POINTER(.cred, &init_cred), \
53259 .comm = "swapper", \
53260 .thread = INIT_THREAD, \
53261 + INIT_TASK_THREAD_INFO \
53262 .fs = &init_fs, \
53263 .files = &init_files, \
53264 .signal = &init_signals, \
53265 diff -urNp linux-2.6.39.4/include/linux/intel-iommu.h linux-2.6.39.4/include/linux/intel-iommu.h
53266 --- linux-2.6.39.4/include/linux/intel-iommu.h 2011-05-19 00:06:34.000000000 -0400
53267 +++ linux-2.6.39.4/include/linux/intel-iommu.h 2011-08-05 20:34:06.000000000 -0400
53268 @@ -296,7 +296,7 @@ struct iommu_flush {
53269 u8 fm, u64 type);
53270 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53271 unsigned int size_order, u64 type);
53272 -};
53273 +} __no_const;
53274
53275 enum {
53276 SR_DMAR_FECTL_REG,
53277 diff -urNp linux-2.6.39.4/include/linux/interrupt.h linux-2.6.39.4/include/linux/interrupt.h
53278 --- linux-2.6.39.4/include/linux/interrupt.h 2011-05-19 00:06:34.000000000 -0400
53279 +++ linux-2.6.39.4/include/linux/interrupt.h 2011-08-05 19:44:37.000000000 -0400
53280 @@ -422,7 +422,7 @@ enum
53281 /* map softirq index to softirq name. update 'softirq_to_name' in
53282 * kernel/softirq.c when adding a new softirq.
53283 */
53284 -extern char *softirq_to_name[NR_SOFTIRQS];
53285 +extern const char * const softirq_to_name[NR_SOFTIRQS];
53286
53287 /* softirq mask and active fields moved to irq_cpustat_t in
53288 * asm/hardirq.h to get better cache usage. KAO
53289 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53290
53291 struct softirq_action
53292 {
53293 - void (*action)(struct softirq_action *);
53294 + void (*action)(void);
53295 };
53296
53297 asmlinkage void do_softirq(void);
53298 asmlinkage void __do_softirq(void);
53299 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53300 +extern void open_softirq(int nr, void (*action)(void));
53301 extern void softirq_init(void);
53302 static inline void __raise_softirq_irqoff(unsigned int nr)
53303 {
53304 diff -urNp linux-2.6.39.4/include/linux/kallsyms.h linux-2.6.39.4/include/linux/kallsyms.h
53305 --- linux-2.6.39.4/include/linux/kallsyms.h 2011-05-19 00:06:34.000000000 -0400
53306 +++ linux-2.6.39.4/include/linux/kallsyms.h 2011-08-05 19:44:37.000000000 -0400
53307 @@ -15,7 +15,8 @@
53308
53309 struct module;
53310
53311 -#ifdef CONFIG_KALLSYMS
53312 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53313 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53314 /* Lookup the address for a symbol. Returns 0 if not found. */
53315 unsigned long kallsyms_lookup_name(const char *name);
53316
53317 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53318 /* Stupid that this does nothing, but I didn't create this mess. */
53319 #define __print_symbol(fmt, addr)
53320 #endif /*CONFIG_KALLSYMS*/
53321 +#else /* when included by kallsyms.c, vsnprintf.c, or
53322 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53323 +extern void __print_symbol(const char *fmt, unsigned long address);
53324 +extern int sprint_backtrace(char *buffer, unsigned long address);
53325 +extern int sprint_symbol(char *buffer, unsigned long address);
53326 +const char *kallsyms_lookup(unsigned long addr,
53327 + unsigned long *symbolsize,
53328 + unsigned long *offset,
53329 + char **modname, char *namebuf);
53330 +#endif
53331
53332 /* This macro allows us to keep printk typechecking */
53333 static void __check_printsym_format(const char *fmt, ...)
53334 diff -urNp linux-2.6.39.4/include/linux/kgdb.h linux-2.6.39.4/include/linux/kgdb.h
53335 --- linux-2.6.39.4/include/linux/kgdb.h 2011-05-19 00:06:34.000000000 -0400
53336 +++ linux-2.6.39.4/include/linux/kgdb.h 2011-08-05 20:34:06.000000000 -0400
53337 @@ -53,7 +53,7 @@ extern int kgdb_connected;
53338 extern int kgdb_io_module_registered;
53339
53340 extern atomic_t kgdb_setting_breakpoint;
53341 -extern atomic_t kgdb_cpu_doing_single_step;
53342 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53343
53344 extern struct task_struct *kgdb_usethread;
53345 extern struct task_struct *kgdb_contthread;
53346 @@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
53347 * hardware debug registers.
53348 */
53349 struct kgdb_arch {
53350 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53351 - unsigned long flags;
53352 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53353 + const unsigned long flags;
53354
53355 int (*set_breakpoint)(unsigned long, char *);
53356 int (*remove_breakpoint)(unsigned long, char *);
53357 @@ -268,14 +268,14 @@ struct kgdb_arch {
53358 * not a console
53359 */
53360 struct kgdb_io {
53361 - const char *name;
53362 + const char * const name;
53363 int (*read_char) (void);
53364 void (*write_char) (u8);
53365 void (*flush) (void);
53366 int (*init) (void);
53367 void (*pre_exception) (void);
53368 void (*post_exception) (void);
53369 - int is_console;
53370 + const int is_console;
53371 };
53372
53373 extern struct kgdb_arch arch_kgdb_ops;
53374 diff -urNp linux-2.6.39.4/include/linux/kmod.h linux-2.6.39.4/include/linux/kmod.h
53375 --- linux-2.6.39.4/include/linux/kmod.h 2011-05-19 00:06:34.000000000 -0400
53376 +++ linux-2.6.39.4/include/linux/kmod.h 2011-08-05 19:44:37.000000000 -0400
53377 @@ -33,6 +33,8 @@ extern char modprobe_path[]; /* for sysc
53378 * usually useless though. */
53379 extern int __request_module(bool wait, const char *name, ...) \
53380 __attribute__((format(printf, 2, 3)));
53381 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53382 + __attribute__((format(printf, 3, 4)));
53383 #define request_module(mod...) __request_module(true, mod)
53384 #define request_module_nowait(mod...) __request_module(false, mod)
53385 #define try_then_request_module(x, mod...) \
53386 diff -urNp linux-2.6.39.4/include/linux/kvm_host.h linux-2.6.39.4/include/linux/kvm_host.h
53387 --- linux-2.6.39.4/include/linux/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
53388 +++ linux-2.6.39.4/include/linux/kvm_host.h 2011-08-05 19:44:37.000000000 -0400
53389 @@ -302,7 +302,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53390 void vcpu_load(struct kvm_vcpu *vcpu);
53391 void vcpu_put(struct kvm_vcpu *vcpu);
53392
53393 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53394 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53395 struct module *module);
53396 void kvm_exit(void);
53397
53398 @@ -442,7 +442,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53399 struct kvm_guest_debug *dbg);
53400 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53401
53402 -int kvm_arch_init(void *opaque);
53403 +int kvm_arch_init(const void *opaque);
53404 void kvm_arch_exit(void);
53405
53406 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53407 diff -urNp linux-2.6.39.4/include/linux/libata.h linux-2.6.39.4/include/linux/libata.h
53408 --- linux-2.6.39.4/include/linux/libata.h 2011-05-19 00:06:34.000000000 -0400
53409 +++ linux-2.6.39.4/include/linux/libata.h 2011-08-05 20:34:06.000000000 -0400
53410 @@ -898,7 +898,7 @@ struct ata_port_operations {
53411 * ->inherits must be the last field and all the preceding
53412 * fields must be pointers.
53413 */
53414 - const struct ata_port_operations *inherits;
53415 + const struct ata_port_operations * const inherits;
53416 };
53417
53418 struct ata_port_info {
53419 diff -urNp linux-2.6.39.4/include/linux/mca.h linux-2.6.39.4/include/linux/mca.h
53420 --- linux-2.6.39.4/include/linux/mca.h 2011-05-19 00:06:34.000000000 -0400
53421 +++ linux-2.6.39.4/include/linux/mca.h 2011-08-05 20:34:06.000000000 -0400
53422 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53423 int region);
53424 void * (*mca_transform_memory)(struct mca_device *,
53425 void *memory);
53426 -};
53427 +} __no_const;
53428
53429 struct mca_bus {
53430 u64 default_dma_mask;
53431 diff -urNp linux-2.6.39.4/include/linux/memory.h linux-2.6.39.4/include/linux/memory.h
53432 --- linux-2.6.39.4/include/linux/memory.h 2011-05-19 00:06:34.000000000 -0400
53433 +++ linux-2.6.39.4/include/linux/memory.h 2011-08-05 20:34:06.000000000 -0400
53434 @@ -142,7 +142,7 @@ struct memory_accessor {
53435 size_t count);
53436 ssize_t (*write)(struct memory_accessor *, const char *buf,
53437 off_t offset, size_t count);
53438 -};
53439 +} __no_const;
53440
53441 /*
53442 * Kernel text modification mutex, used for code patching. Users of this lock
53443 diff -urNp linux-2.6.39.4/include/linux/mfd/abx500.h linux-2.6.39.4/include/linux/mfd/abx500.h
53444 --- linux-2.6.39.4/include/linux/mfd/abx500.h 2011-05-19 00:06:34.000000000 -0400
53445 +++ linux-2.6.39.4/include/linux/mfd/abx500.h 2011-08-05 20:34:06.000000000 -0400
53446 @@ -226,6 +226,7 @@ struct abx500_ops {
53447 int (*event_registers_startup_state_get) (struct device *, u8 *);
53448 int (*startup_irq_enabled) (struct device *, unsigned int);
53449 };
53450 +typedef struct abx500_ops __no_const abx500_ops_no_const;
53451
53452 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53453 void abx500_remove_ops(struct device *dev);
53454 diff -urNp linux-2.6.39.4/include/linux/mm.h linux-2.6.39.4/include/linux/mm.h
53455 --- linux-2.6.39.4/include/linux/mm.h 2011-05-19 00:06:34.000000000 -0400
53456 +++ linux-2.6.39.4/include/linux/mm.h 2011-08-05 19:44:37.000000000 -0400
53457 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53458
53459 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53460 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53461 +
53462 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53463 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53464 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53465 +#else
53466 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53467 +#endif
53468 +
53469 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53470 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53471
53472 @@ -1010,34 +1017,6 @@ int set_page_dirty(struct page *page);
53473 int set_page_dirty_lock(struct page *page);
53474 int clear_page_dirty_for_io(struct page *page);
53475
53476 -/* Is the vma a continuation of the stack vma above it? */
53477 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53478 -{
53479 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53480 -}
53481 -
53482 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
53483 - unsigned long addr)
53484 -{
53485 - return (vma->vm_flags & VM_GROWSDOWN) &&
53486 - (vma->vm_start == addr) &&
53487 - !vma_growsdown(vma->vm_prev, addr);
53488 -}
53489 -
53490 -/* Is the vma a continuation of the stack vma below it? */
53491 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53492 -{
53493 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53494 -}
53495 -
53496 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
53497 - unsigned long addr)
53498 -{
53499 - return (vma->vm_flags & VM_GROWSUP) &&
53500 - (vma->vm_end == addr) &&
53501 - !vma_growsup(vma->vm_next, addr);
53502 -}
53503 -
53504 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53505 unsigned long old_addr, struct vm_area_struct *new_vma,
53506 unsigned long new_addr, unsigned long len);
53507 @@ -1189,6 +1168,15 @@ struct shrinker {
53508 extern void register_shrinker(struct shrinker *);
53509 extern void unregister_shrinker(struct shrinker *);
53510
53511 +#ifdef CONFIG_MMU
53512 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
53513 +#else
53514 +static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53515 +{
53516 + return __pgprot(0);
53517 +}
53518 +#endif
53519 +
53520 int vma_wants_writenotify(struct vm_area_struct *vma);
53521
53522 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53523 @@ -1476,6 +1464,7 @@ out:
53524 }
53525
53526 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53527 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53528
53529 extern unsigned long do_brk(unsigned long, unsigned long);
53530
53531 @@ -1532,6 +1521,10 @@ extern struct vm_area_struct * find_vma(
53532 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53533 struct vm_area_struct **pprev);
53534
53535 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53536 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53537 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53538 +
53539 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53540 NULL if none. Assume start_addr < end_addr. */
53541 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53542 @@ -1548,15 +1541,6 @@ static inline unsigned long vma_pages(st
53543 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53544 }
53545
53546 -#ifdef CONFIG_MMU
53547 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
53548 -#else
53549 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53550 -{
53551 - return __pgprot(0);
53552 -}
53553 -#endif
53554 -
53555 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53556 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53557 unsigned long pfn, unsigned long size, pgprot_t);
53558 @@ -1668,7 +1652,7 @@ extern int unpoison_memory(unsigned long
53559 extern int sysctl_memory_failure_early_kill;
53560 extern int sysctl_memory_failure_recovery;
53561 extern void shake_page(struct page *p, int access);
53562 -extern atomic_long_t mce_bad_pages;
53563 +extern atomic_long_unchecked_t mce_bad_pages;
53564 extern int soft_offline_page(struct page *page, int flags);
53565
53566 extern void dump_page(struct page *page);
53567 @@ -1682,5 +1666,11 @@ extern void copy_user_huge_page(struct p
53568 unsigned int pages_per_huge_page);
53569 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53570
53571 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53572 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53573 +#else
53574 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53575 +#endif
53576 +
53577 #endif /* __KERNEL__ */
53578 #endif /* _LINUX_MM_H */
53579 diff -urNp linux-2.6.39.4/include/linux/mm_types.h linux-2.6.39.4/include/linux/mm_types.h
53580 --- linux-2.6.39.4/include/linux/mm_types.h 2011-05-19 00:06:34.000000000 -0400
53581 +++ linux-2.6.39.4/include/linux/mm_types.h 2011-08-05 19:44:37.000000000 -0400
53582 @@ -183,6 +183,8 @@ struct vm_area_struct {
53583 #ifdef CONFIG_NUMA
53584 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53585 #endif
53586 +
53587 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53588 };
53589
53590 struct core_thread {
53591 @@ -317,6 +319,24 @@ struct mm_struct {
53592 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
53593 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
53594 #endif
53595 +
53596 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53597 + unsigned long pax_flags;
53598 +#endif
53599 +
53600 +#ifdef CONFIG_PAX_DLRESOLVE
53601 + unsigned long call_dl_resolve;
53602 +#endif
53603 +
53604 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53605 + unsigned long call_syscall;
53606 +#endif
53607 +
53608 +#ifdef CONFIG_PAX_ASLR
53609 + unsigned long delta_mmap; /* randomized offset */
53610 + unsigned long delta_stack; /* randomized offset */
53611 +#endif
53612 +
53613 };
53614
53615 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
53616 diff -urNp linux-2.6.39.4/include/linux/mmu_notifier.h linux-2.6.39.4/include/linux/mmu_notifier.h
53617 --- linux-2.6.39.4/include/linux/mmu_notifier.h 2011-05-19 00:06:34.000000000 -0400
53618 +++ linux-2.6.39.4/include/linux/mmu_notifier.h 2011-08-05 19:44:37.000000000 -0400
53619 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53620 */
53621 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53622 ({ \
53623 - pte_t __pte; \
53624 + pte_t ___pte; \
53625 struct vm_area_struct *___vma = __vma; \
53626 unsigned long ___address = __address; \
53627 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53628 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53629 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53630 - __pte; \
53631 + ___pte; \
53632 })
53633
53634 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53635 diff -urNp linux-2.6.39.4/include/linux/mmzone.h linux-2.6.39.4/include/linux/mmzone.h
53636 --- linux-2.6.39.4/include/linux/mmzone.h 2011-05-19 00:06:34.000000000 -0400
53637 +++ linux-2.6.39.4/include/linux/mmzone.h 2011-08-05 19:44:37.000000000 -0400
53638 @@ -355,7 +355,7 @@ struct zone {
53639 unsigned long flags; /* zone flags, see below */
53640
53641 /* Zone statistics */
53642 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53643 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53644
53645 /*
53646 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53647 diff -urNp linux-2.6.39.4/include/linux/mod_devicetable.h linux-2.6.39.4/include/linux/mod_devicetable.h
53648 --- linux-2.6.39.4/include/linux/mod_devicetable.h 2011-05-19 00:06:34.000000000 -0400
53649 +++ linux-2.6.39.4/include/linux/mod_devicetable.h 2011-08-05 19:44:37.000000000 -0400
53650 @@ -12,7 +12,7 @@
53651 typedef unsigned long kernel_ulong_t;
53652 #endif
53653
53654 -#define PCI_ANY_ID (~0)
53655 +#define PCI_ANY_ID ((__u16)~0)
53656
53657 struct pci_device_id {
53658 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53659 @@ -131,7 +131,7 @@ struct usb_device_id {
53660 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53661 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53662
53663 -#define HID_ANY_ID (~0)
53664 +#define HID_ANY_ID (~0U)
53665
53666 struct hid_device_id {
53667 __u16 bus;
53668 diff -urNp linux-2.6.39.4/include/linux/module.h linux-2.6.39.4/include/linux/module.h
53669 --- linux-2.6.39.4/include/linux/module.h 2011-05-19 00:06:34.000000000 -0400
53670 +++ linux-2.6.39.4/include/linux/module.h 2011-08-05 20:34:06.000000000 -0400
53671 @@ -16,6 +16,7 @@
53672 #include <linux/kobject.h>
53673 #include <linux/moduleparam.h>
53674 #include <linux/tracepoint.h>
53675 +#include <linux/fs.h>
53676
53677 #include <linux/percpu.h>
53678 #include <asm/module.h>
53679 @@ -324,19 +325,16 @@ struct module
53680 int (*init)(void);
53681
53682 /* If this is non-NULL, vfree after init() returns */
53683 - void *module_init;
53684 + void *module_init_rx, *module_init_rw;
53685
53686 /* Here is the actual code + data, vfree'd on unload. */
53687 - void *module_core;
53688 + void *module_core_rx, *module_core_rw;
53689
53690 /* Here are the sizes of the init and core sections */
53691 - unsigned int init_size, core_size;
53692 + unsigned int init_size_rw, core_size_rw;
53693
53694 /* The size of the executable code in each section. */
53695 - unsigned int init_text_size, core_text_size;
53696 -
53697 - /* Size of RO sections of the module (text+rodata) */
53698 - unsigned int init_ro_size, core_ro_size;
53699 + unsigned int init_size_rx, core_size_rx;
53700
53701 /* Arch-specific module values */
53702 struct mod_arch_specific arch;
53703 @@ -391,6 +389,10 @@ struct module
53704 #ifdef CONFIG_EVENT_TRACING
53705 struct ftrace_event_call **trace_events;
53706 unsigned int num_trace_events;
53707 + struct file_operations trace_id;
53708 + struct file_operations trace_enable;
53709 + struct file_operations trace_format;
53710 + struct file_operations trace_filter;
53711 #endif
53712 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53713 unsigned long *ftrace_callsites;
53714 @@ -441,16 +443,46 @@ bool is_module_address(unsigned long add
53715 bool is_module_percpu_address(unsigned long addr);
53716 bool is_module_text_address(unsigned long addr);
53717
53718 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53719 +{
53720 +
53721 +#ifdef CONFIG_PAX_KERNEXEC
53722 + if (ktla_ktva(addr) >= (unsigned long)start &&
53723 + ktla_ktva(addr) < (unsigned long)start + size)
53724 + return 1;
53725 +#endif
53726 +
53727 + return ((void *)addr >= start && (void *)addr < start + size);
53728 +}
53729 +
53730 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53731 +{
53732 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53733 +}
53734 +
53735 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53736 +{
53737 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53738 +}
53739 +
53740 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53741 +{
53742 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53743 +}
53744 +
53745 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53746 +{
53747 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53748 +}
53749 +
53750 static inline int within_module_core(unsigned long addr, struct module *mod)
53751 {
53752 - return (unsigned long)mod->module_core <= addr &&
53753 - addr < (unsigned long)mod->module_core + mod->core_size;
53754 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53755 }
53756
53757 static inline int within_module_init(unsigned long addr, struct module *mod)
53758 {
53759 - return (unsigned long)mod->module_init <= addr &&
53760 - addr < (unsigned long)mod->module_init + mod->init_size;
53761 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53762 }
53763
53764 /* Search for module by name: must hold module_mutex. */
53765 diff -urNp linux-2.6.39.4/include/linux/moduleloader.h linux-2.6.39.4/include/linux/moduleloader.h
53766 --- linux-2.6.39.4/include/linux/moduleloader.h 2011-05-19 00:06:34.000000000 -0400
53767 +++ linux-2.6.39.4/include/linux/moduleloader.h 2011-08-05 19:44:37.000000000 -0400
53768 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53769 sections. Returns NULL on failure. */
53770 void *module_alloc(unsigned long size);
53771
53772 +#ifdef CONFIG_PAX_KERNEXEC
53773 +void *module_alloc_exec(unsigned long size);
53774 +#else
53775 +#define module_alloc_exec(x) module_alloc(x)
53776 +#endif
53777 +
53778 /* Free memory returned from module_alloc. */
53779 void module_free(struct module *mod, void *module_region);
53780
53781 +#ifdef CONFIG_PAX_KERNEXEC
53782 +void module_free_exec(struct module *mod, void *module_region);
53783 +#else
53784 +#define module_free_exec(x, y) module_free((x), (y))
53785 +#endif
53786 +
53787 /* Apply the given relocation to the (simplified) ELF. Return -error
53788 or 0. */
53789 int apply_relocate(Elf_Shdr *sechdrs,
53790 diff -urNp linux-2.6.39.4/include/linux/moduleparam.h linux-2.6.39.4/include/linux/moduleparam.h
53791 --- linux-2.6.39.4/include/linux/moduleparam.h 2011-05-19 00:06:34.000000000 -0400
53792 +++ linux-2.6.39.4/include/linux/moduleparam.h 2011-08-05 20:34:06.000000000 -0400
53793 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53794 * @len is usually just sizeof(string).
53795 */
53796 #define module_param_string(name, string, len, perm) \
53797 - static const struct kparam_string __param_string_##name \
53798 + static const struct kparam_string __param_string_##name __used \
53799 = { len, string }; \
53800 __module_param_call(MODULE_PARAM_PREFIX, name, \
53801 &param_ops_string, \
53802 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53803 * module_param_named() for why this might be necessary.
53804 */
53805 #define module_param_array_named(name, array, type, nump, perm) \
53806 - static const struct kparam_array __param_arr_##name \
53807 + static const struct kparam_array __param_arr_##name __used \
53808 = { ARRAY_SIZE(array), nump, &param_ops_##type, \
53809 sizeof(array[0]), array }; \
53810 __module_param_call(MODULE_PARAM_PREFIX, name, \
53811 diff -urNp linux-2.6.39.4/include/linux/mutex.h linux-2.6.39.4/include/linux/mutex.h
53812 --- linux-2.6.39.4/include/linux/mutex.h 2011-05-19 00:06:34.000000000 -0400
53813 +++ linux-2.6.39.4/include/linux/mutex.h 2011-08-05 19:44:37.000000000 -0400
53814 @@ -51,7 +51,7 @@ struct mutex {
53815 spinlock_t wait_lock;
53816 struct list_head wait_list;
53817 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
53818 - struct thread_info *owner;
53819 + struct task_struct *owner;
53820 #endif
53821 #ifdef CONFIG_DEBUG_MUTEXES
53822 const char *name;
53823 diff -urNp linux-2.6.39.4/include/linux/namei.h linux-2.6.39.4/include/linux/namei.h
53824 --- linux-2.6.39.4/include/linux/namei.h 2011-05-19 00:06:34.000000000 -0400
53825 +++ linux-2.6.39.4/include/linux/namei.h 2011-08-05 19:44:37.000000000 -0400
53826 @@ -24,7 +24,7 @@ struct nameidata {
53827 unsigned seq;
53828 int last_type;
53829 unsigned depth;
53830 - char *saved_names[MAX_NESTED_LINKS + 1];
53831 + const char *saved_names[MAX_NESTED_LINKS + 1];
53832
53833 /* Intent data */
53834 union {
53835 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53836 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53837 extern void unlock_rename(struct dentry *, struct dentry *);
53838
53839 -static inline void nd_set_link(struct nameidata *nd, char *path)
53840 +static inline void nd_set_link(struct nameidata *nd, const char *path)
53841 {
53842 nd->saved_names[nd->depth] = path;
53843 }
53844
53845 -static inline char *nd_get_link(struct nameidata *nd)
53846 +static inline const char *nd_get_link(const struct nameidata *nd)
53847 {
53848 return nd->saved_names[nd->depth];
53849 }
53850 diff -urNp linux-2.6.39.4/include/linux/netdevice.h linux-2.6.39.4/include/linux/netdevice.h
53851 --- linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:11:51.000000000 -0400
53852 +++ linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:12:20.000000000 -0400
53853 @@ -979,6 +979,7 @@ struct net_device_ops {
53854 int (*ndo_set_features)(struct net_device *dev,
53855 u32 features);
53856 };
53857 +typedef struct net_device_ops __no_const net_device_ops_no_const;
53858
53859 /*
53860 * The DEVICE structure.
53861 diff -urNp linux-2.6.39.4/include/linux/netfilter/xt_gradm.h linux-2.6.39.4/include/linux/netfilter/xt_gradm.h
53862 --- linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53863 +++ linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 2011-08-05 19:44:37.000000000 -0400
53864 @@ -0,0 +1,9 @@
53865 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
53866 +#define _LINUX_NETFILTER_XT_GRADM_H 1
53867 +
53868 +struct xt_gradm_mtinfo {
53869 + __u16 flags;
53870 + __u16 invflags;
53871 +};
53872 +
53873 +#endif
53874 diff -urNp linux-2.6.39.4/include/linux/oprofile.h linux-2.6.39.4/include/linux/oprofile.h
53875 --- linux-2.6.39.4/include/linux/oprofile.h 2011-05-19 00:06:34.000000000 -0400
53876 +++ linux-2.6.39.4/include/linux/oprofile.h 2011-08-05 19:44:37.000000000 -0400
53877 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53878 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53879 char const * name, ulong * val);
53880
53881 -/** Create a file for read-only access to an atomic_t. */
53882 +/** Create a file for read-only access to an atomic_unchecked_t. */
53883 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53884 - char const * name, atomic_t * val);
53885 + char const * name, atomic_unchecked_t * val);
53886
53887 /** create a directory */
53888 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53889 diff -urNp linux-2.6.39.4/include/linux/padata.h linux-2.6.39.4/include/linux/padata.h
53890 --- linux-2.6.39.4/include/linux/padata.h 2011-05-19 00:06:34.000000000 -0400
53891 +++ linux-2.6.39.4/include/linux/padata.h 2011-08-05 19:44:37.000000000 -0400
53892 @@ -129,7 +129,7 @@ struct parallel_data {
53893 struct padata_instance *pinst;
53894 struct padata_parallel_queue __percpu *pqueue;
53895 struct padata_serial_queue __percpu *squeue;
53896 - atomic_t seq_nr;
53897 + atomic_unchecked_t seq_nr;
53898 atomic_t reorder_objects;
53899 atomic_t refcnt;
53900 unsigned int max_seq_nr;
53901 diff -urNp linux-2.6.39.4/include/linux/perf_event.h linux-2.6.39.4/include/linux/perf_event.h
53902 --- linux-2.6.39.4/include/linux/perf_event.h 2011-05-19 00:06:34.000000000 -0400
53903 +++ linux-2.6.39.4/include/linux/perf_event.h 2011-08-05 20:34:06.000000000 -0400
53904 @@ -759,8 +759,8 @@ struct perf_event {
53905
53906 enum perf_event_active_state state;
53907 unsigned int attach_state;
53908 - local64_t count;
53909 - atomic64_t child_count;
53910 + local64_t count; /* PaX: fix it one day */
53911 + atomic64_unchecked_t child_count;
53912
53913 /*
53914 * These are the total time in nanoseconds that the event
53915 @@ -811,8 +811,8 @@ struct perf_event {
53916 * These accumulate total time (in nanoseconds) that children
53917 * events have been enabled and running, respectively.
53918 */
53919 - atomic64_t child_total_time_enabled;
53920 - atomic64_t child_total_time_running;
53921 + atomic64_unchecked_t child_total_time_enabled;
53922 + atomic64_unchecked_t child_total_time_running;
53923
53924 /*
53925 * Protect attach/detach and child_list:
53926 diff -urNp linux-2.6.39.4/include/linux/pipe_fs_i.h linux-2.6.39.4/include/linux/pipe_fs_i.h
53927 --- linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-05-19 00:06:34.000000000 -0400
53928 +++ linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-08-05 19:44:37.000000000 -0400
53929 @@ -46,9 +46,9 @@ struct pipe_buffer {
53930 struct pipe_inode_info {
53931 wait_queue_head_t wait;
53932 unsigned int nrbufs, curbuf, buffers;
53933 - unsigned int readers;
53934 - unsigned int writers;
53935 - unsigned int waiting_writers;
53936 + atomic_t readers;
53937 + atomic_t writers;
53938 + atomic_t waiting_writers;
53939 unsigned int r_counter;
53940 unsigned int w_counter;
53941 struct page *tmp_page;
53942 diff -urNp linux-2.6.39.4/include/linux/pm_runtime.h linux-2.6.39.4/include/linux/pm_runtime.h
53943 --- linux-2.6.39.4/include/linux/pm_runtime.h 2011-05-19 00:06:34.000000000 -0400
53944 +++ linux-2.6.39.4/include/linux/pm_runtime.h 2011-08-05 19:44:37.000000000 -0400
53945 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
53946
53947 static inline void pm_runtime_mark_last_busy(struct device *dev)
53948 {
53949 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
53950 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
53951 }
53952
53953 #else /* !CONFIG_PM_RUNTIME */
53954 diff -urNp linux-2.6.39.4/include/linux/poison.h linux-2.6.39.4/include/linux/poison.h
53955 --- linux-2.6.39.4/include/linux/poison.h 2011-05-19 00:06:34.000000000 -0400
53956 +++ linux-2.6.39.4/include/linux/poison.h 2011-08-05 19:44:37.000000000 -0400
53957 @@ -19,8 +19,8 @@
53958 * under normal circumstances, used to verify that nobody uses
53959 * non-initialized list entries.
53960 */
53961 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
53962 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
53963 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
53964 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
53965
53966 /********** include/linux/timer.h **********/
53967 /*
53968 diff -urNp linux-2.6.39.4/include/linux/preempt.h linux-2.6.39.4/include/linux/preempt.h
53969 --- linux-2.6.39.4/include/linux/preempt.h 2011-05-19 00:06:34.000000000 -0400
53970 +++ linux-2.6.39.4/include/linux/preempt.h 2011-08-05 20:34:06.000000000 -0400
53971 @@ -115,7 +115,7 @@ struct preempt_ops {
53972 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
53973 void (*sched_out)(struct preempt_notifier *notifier,
53974 struct task_struct *next);
53975 -};
53976 +} __no_const;
53977
53978 /**
53979 * preempt_notifier - key for installing preemption notifiers
53980 diff -urNp linux-2.6.39.4/include/linux/proc_fs.h linux-2.6.39.4/include/linux/proc_fs.h
53981 --- linux-2.6.39.4/include/linux/proc_fs.h 2011-05-19 00:06:34.000000000 -0400
53982 +++ linux-2.6.39.4/include/linux/proc_fs.h 2011-08-05 20:34:06.000000000 -0400
53983 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
53984 return proc_create_data(name, mode, parent, proc_fops, NULL);
53985 }
53986
53987 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
53988 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
53989 +{
53990 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53991 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
53992 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53993 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
53994 +#else
53995 + return proc_create_data(name, mode, parent, proc_fops, NULL);
53996 +#endif
53997 +}
53998 +
53999 +
54000 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
54001 mode_t mode, struct proc_dir_entry *base,
54002 read_proc_t *read_proc, void * data)
54003 @@ -258,7 +271,7 @@ union proc_op {
54004 int (*proc_show)(struct seq_file *m,
54005 struct pid_namespace *ns, struct pid *pid,
54006 struct task_struct *task);
54007 -};
54008 +} __no_const;
54009
54010 struct ctl_table_header;
54011 struct ctl_table;
54012 diff -urNp linux-2.6.39.4/include/linux/ptrace.h linux-2.6.39.4/include/linux/ptrace.h
54013 --- linux-2.6.39.4/include/linux/ptrace.h 2011-05-19 00:06:34.000000000 -0400
54014 +++ linux-2.6.39.4/include/linux/ptrace.h 2011-08-05 19:44:37.000000000 -0400
54015 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
54016 extern void exit_ptrace(struct task_struct *tracer);
54017 #define PTRACE_MODE_READ 1
54018 #define PTRACE_MODE_ATTACH 2
54019 -/* Returns 0 on success, -errno on denial. */
54020 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
54021 /* Returns true on success, false on denial. */
54022 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
54023 +/* Returns true on success, false on denial. */
54024 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
54025
54026 static inline int ptrace_reparented(struct task_struct *child)
54027 {
54028 diff -urNp linux-2.6.39.4/include/linux/random.h linux-2.6.39.4/include/linux/random.h
54029 --- linux-2.6.39.4/include/linux/random.h 2011-05-19 00:06:34.000000000 -0400
54030 +++ linux-2.6.39.4/include/linux/random.h 2011-08-05 19:44:37.000000000 -0400
54031 @@ -80,12 +80,17 @@ void srandom32(u32 seed);
54032
54033 u32 prandom32(struct rnd_state *);
54034
54035 +static inline unsigned long pax_get_random_long(void)
54036 +{
54037 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
54038 +}
54039 +
54040 /*
54041 * Handle minimum values for seeds
54042 */
54043 static inline u32 __seed(u32 x, u32 m)
54044 {
54045 - return (x < m) ? x + m : x;
54046 + return (x <= m) ? x + m + 1 : x;
54047 }
54048
54049 /**
54050 diff -urNp linux-2.6.39.4/include/linux/reboot.h linux-2.6.39.4/include/linux/reboot.h
54051 --- linux-2.6.39.4/include/linux/reboot.h 2011-05-19 00:06:34.000000000 -0400
54052 +++ linux-2.6.39.4/include/linux/reboot.h 2011-08-05 19:44:37.000000000 -0400
54053 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
54054 * Architecture-specific implementations of sys_reboot commands.
54055 */
54056
54057 -extern void machine_restart(char *cmd);
54058 -extern void machine_halt(void);
54059 -extern void machine_power_off(void);
54060 +extern void machine_restart(char *cmd) __noreturn;
54061 +extern void machine_halt(void) __noreturn;
54062 +extern void machine_power_off(void) __noreturn;
54063
54064 extern void machine_shutdown(void);
54065 struct pt_regs;
54066 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
54067 */
54068
54069 extern void kernel_restart_prepare(char *cmd);
54070 -extern void kernel_restart(char *cmd);
54071 -extern void kernel_halt(void);
54072 -extern void kernel_power_off(void);
54073 +extern void kernel_restart(char *cmd) __noreturn;
54074 +extern void kernel_halt(void) __noreturn;
54075 +extern void kernel_power_off(void) __noreturn;
54076
54077 extern int C_A_D; /* for sysctl */
54078 void ctrl_alt_del(void);
54079 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
54080 * Emergency restart, callable from an interrupt handler.
54081 */
54082
54083 -extern void emergency_restart(void);
54084 +extern void emergency_restart(void) __noreturn;
54085 #include <asm/emergency-restart.h>
54086
54087 #endif
54088 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs.h linux-2.6.39.4/include/linux/reiserfs_fs.h
54089 --- linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-05-19 00:06:34.000000000 -0400
54090 +++ linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-08-05 20:34:06.000000000 -0400
54091 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
54092 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
54093
54094 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
54095 -#define get_generation(s) atomic_read (&fs_generation(s))
54096 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
54097 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
54098 #define __fs_changed(gen,s) (gen != get_generation (s))
54099 #define fs_changed(gen,s) \
54100 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs_sb.h linux-2.6.39.4/include/linux/reiserfs_fs_sb.h
54101 --- linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-05-19 00:06:34.000000000 -0400
54102 +++ linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-08-05 19:44:37.000000000 -0400
54103 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
54104 /* Comment? -Hans */
54105 wait_queue_head_t s_wait;
54106 /* To be obsoleted soon by per buffer seals.. -Hans */
54107 - atomic_t s_generation_counter; // increased by one every time the
54108 + atomic_unchecked_t s_generation_counter; // increased by one every time the
54109 // tree gets re-balanced
54110 unsigned long s_properties; /* File system properties. Currently holds
54111 on-disk FS format */
54112 diff -urNp linux-2.6.39.4/include/linux/relay.h linux-2.6.39.4/include/linux/relay.h
54113 --- linux-2.6.39.4/include/linux/relay.h 2011-05-19 00:06:34.000000000 -0400
54114 +++ linux-2.6.39.4/include/linux/relay.h 2011-08-05 20:34:06.000000000 -0400
54115 @@ -159,7 +159,7 @@ struct rchan_callbacks
54116 * The callback should return 0 if successful, negative if not.
54117 */
54118 int (*remove_buf_file)(struct dentry *dentry);
54119 -};
54120 +} __no_const;
54121
54122 /*
54123 * CONFIG_RELAY kernel API, kernel/relay.c
54124 diff -urNp linux-2.6.39.4/include/linux/rfkill.h linux-2.6.39.4/include/linux/rfkill.h
54125 --- linux-2.6.39.4/include/linux/rfkill.h 2011-05-19 00:06:34.000000000 -0400
54126 +++ linux-2.6.39.4/include/linux/rfkill.h 2011-08-05 20:34:06.000000000 -0400
54127 @@ -147,6 +147,7 @@ struct rfkill_ops {
54128 void (*query)(struct rfkill *rfkill, void *data);
54129 int (*set_block)(void *data, bool blocked);
54130 };
54131 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54132
54133 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54134 /**
54135 diff -urNp linux-2.6.39.4/include/linux/rmap.h linux-2.6.39.4/include/linux/rmap.h
54136 --- linux-2.6.39.4/include/linux/rmap.h 2011-05-19 00:06:34.000000000 -0400
54137 +++ linux-2.6.39.4/include/linux/rmap.h 2011-08-05 19:44:37.000000000 -0400
54138 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54139 void anon_vma_init(void); /* create anon_vma_cachep */
54140 int anon_vma_prepare(struct vm_area_struct *);
54141 void unlink_anon_vmas(struct vm_area_struct *);
54142 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54143 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54144 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54145 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54146 void __anon_vma_link(struct vm_area_struct *);
54147
54148 static inline void anon_vma_merge(struct vm_area_struct *vma,
54149 diff -urNp linux-2.6.39.4/include/linux/sched.h linux-2.6.39.4/include/linux/sched.h
54150 --- linux-2.6.39.4/include/linux/sched.h 2011-05-19 00:06:34.000000000 -0400
54151 +++ linux-2.6.39.4/include/linux/sched.h 2011-08-05 20:34:06.000000000 -0400
54152 @@ -100,6 +100,7 @@ struct bio_list;
54153 struct fs_struct;
54154 struct perf_event_context;
54155 struct blk_plug;
54156 +struct linux_binprm;
54157
54158 /*
54159 * List of flags we want to share for kernel threads,
54160 @@ -360,7 +361,7 @@ extern signed long schedule_timeout_inte
54161 extern signed long schedule_timeout_killable(signed long timeout);
54162 extern signed long schedule_timeout_uninterruptible(signed long timeout);
54163 asmlinkage void schedule(void);
54164 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
54165 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
54166
54167 struct nsproxy;
54168 struct user_namespace;
54169 @@ -381,10 +382,13 @@ struct user_namespace;
54170 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54171
54172 extern int sysctl_max_map_count;
54173 +extern unsigned long sysctl_heap_stack_gap;
54174
54175 #include <linux/aio.h>
54176
54177 #ifdef CONFIG_MMU
54178 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54179 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54180 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54181 extern unsigned long
54182 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54183 @@ -629,6 +633,17 @@ struct signal_struct {
54184 #ifdef CONFIG_TASKSTATS
54185 struct taskstats *stats;
54186 #endif
54187 +
54188 +#ifdef CONFIG_GRKERNSEC
54189 + u32 curr_ip;
54190 + u32 saved_ip;
54191 + u32 gr_saddr;
54192 + u32 gr_daddr;
54193 + u16 gr_sport;
54194 + u16 gr_dport;
54195 + u8 used_accept:1;
54196 +#endif
54197 +
54198 #ifdef CONFIG_AUDIT
54199 unsigned audit_tty;
54200 struct tty_audit_buf *tty_audit_buf;
54201 @@ -701,6 +716,11 @@ struct user_struct {
54202 struct key *session_keyring; /* UID's default session keyring */
54203 #endif
54204
54205 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54206 + unsigned int banned;
54207 + unsigned long ban_expires;
54208 +#endif
54209 +
54210 /* Hash table maintenance information */
54211 struct hlist_node uidhash_node;
54212 uid_t uid;
54213 @@ -1310,8 +1330,8 @@ struct task_struct {
54214 struct list_head thread_group;
54215
54216 struct completion *vfork_done; /* for vfork() */
54217 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54218 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54219 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54220 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54221
54222 cputime_t utime, stime, utimescaled, stimescaled;
54223 cputime_t gtime;
54224 @@ -1327,13 +1347,6 @@ struct task_struct {
54225 struct task_cputime cputime_expires;
54226 struct list_head cpu_timers[3];
54227
54228 -/* process credentials */
54229 - const struct cred __rcu *real_cred; /* objective and real subjective task
54230 - * credentials (COW) */
54231 - const struct cred __rcu *cred; /* effective (overridable) subjective task
54232 - * credentials (COW) */
54233 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54234 -
54235 char comm[TASK_COMM_LEN]; /* executable name excluding path
54236 - access with [gs]et_task_comm (which lock
54237 it with task_lock())
54238 @@ -1350,8 +1363,16 @@ struct task_struct {
54239 #endif
54240 /* CPU-specific state of this task */
54241 struct thread_struct thread;
54242 +/* thread_info moved to task_struct */
54243 +#ifdef CONFIG_X86
54244 + struct thread_info tinfo;
54245 +#endif
54246 /* filesystem information */
54247 struct fs_struct *fs;
54248 +
54249 + const struct cred __rcu *cred; /* effective (overridable) subjective task
54250 + * credentials (COW) */
54251 +
54252 /* open file information */
54253 struct files_struct *files;
54254 /* namespaces */
54255 @@ -1398,6 +1419,11 @@ struct task_struct {
54256 struct rt_mutex_waiter *pi_blocked_on;
54257 #endif
54258
54259 +/* process credentials */
54260 + const struct cred __rcu *real_cred; /* objective and real subjective task
54261 + * credentials (COW) */
54262 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54263 +
54264 #ifdef CONFIG_DEBUG_MUTEXES
54265 /* mutex deadlock detection */
54266 struct mutex_waiter *blocked_on;
54267 @@ -1508,6 +1534,21 @@ struct task_struct {
54268 unsigned long default_timer_slack_ns;
54269
54270 struct list_head *scm_work_list;
54271 +
54272 +#ifdef CONFIG_GRKERNSEC
54273 + /* grsecurity */
54274 + struct dentry *gr_chroot_dentry;
54275 + struct acl_subject_label *acl;
54276 + struct acl_role_label *role;
54277 + struct file *exec_file;
54278 + u16 acl_role_id;
54279 + /* is this the task that authenticated to the special role */
54280 + u8 acl_sp_role;
54281 + u8 is_writable;
54282 + u8 brute;
54283 + u8 gr_is_chrooted;
54284 +#endif
54285 +
54286 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54287 /* Index of current stored address in ret_stack */
54288 int curr_ret_stack;
54289 @@ -1542,6 +1583,57 @@ struct task_struct {
54290 #endif
54291 };
54292
54293 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54294 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54295 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54296 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54297 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54298 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54299 +
54300 +#ifdef CONFIG_PAX_SOFTMODE
54301 +extern int pax_softmode;
54302 +#endif
54303 +
54304 +extern int pax_check_flags(unsigned long *);
54305 +
54306 +/* if tsk != current then task_lock must be held on it */
54307 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54308 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
54309 +{
54310 + if (likely(tsk->mm))
54311 + return tsk->mm->pax_flags;
54312 + else
54313 + return 0UL;
54314 +}
54315 +
54316 +/* if tsk != current then task_lock must be held on it */
54317 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54318 +{
54319 + if (likely(tsk->mm)) {
54320 + tsk->mm->pax_flags = flags;
54321 + return 0;
54322 + }
54323 + return -EINVAL;
54324 +}
54325 +#endif
54326 +
54327 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54328 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
54329 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54330 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54331 +#endif
54332 +
54333 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54334 +extern void pax_report_insns(void *pc, void *sp);
54335 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
54336 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54337 +
54338 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54339 +extern void pax_track_stack(void);
54340 +#else
54341 +static inline void pax_track_stack(void) {}
54342 +#endif
54343 +
54344 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54345 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54346
54347 @@ -2009,7 +2101,9 @@ void yield(void);
54348 extern struct exec_domain default_exec_domain;
54349
54350 union thread_union {
54351 +#ifndef CONFIG_X86
54352 struct thread_info thread_info;
54353 +#endif
54354 unsigned long stack[THREAD_SIZE/sizeof(long)];
54355 };
54356
54357 @@ -2042,6 +2136,7 @@ extern struct pid_namespace init_pid_ns;
54358 */
54359
54360 extern struct task_struct *find_task_by_vpid(pid_t nr);
54361 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54362 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54363 struct pid_namespace *ns);
54364
54365 @@ -2179,7 +2274,7 @@ extern void __cleanup_sighand(struct sig
54366 extern void exit_itimers(struct signal_struct *);
54367 extern void flush_itimer_signals(void);
54368
54369 -extern NORET_TYPE void do_group_exit(int);
54370 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54371
54372 extern void daemonize(const char *, ...);
54373 extern int allow_signal(int);
54374 @@ -2320,13 +2415,17 @@ static inline unsigned long *end_of_stac
54375
54376 #endif
54377
54378 -static inline int object_is_on_stack(void *obj)
54379 +static inline int object_starts_on_stack(void *obj)
54380 {
54381 - void *stack = task_stack_page(current);
54382 + const void *stack = task_stack_page(current);
54383
54384 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54385 }
54386
54387 +#ifdef CONFIG_PAX_USERCOPY
54388 +extern int object_is_on_stack(const void *obj, unsigned long len);
54389 +#endif
54390 +
54391 extern void thread_info_cache_init(void);
54392
54393 #ifdef CONFIG_DEBUG_STACK_USAGE
54394 diff -urNp linux-2.6.39.4/include/linux/screen_info.h linux-2.6.39.4/include/linux/screen_info.h
54395 --- linux-2.6.39.4/include/linux/screen_info.h 2011-05-19 00:06:34.000000000 -0400
54396 +++ linux-2.6.39.4/include/linux/screen_info.h 2011-08-05 19:44:37.000000000 -0400
54397 @@ -43,7 +43,8 @@ struct screen_info {
54398 __u16 pages; /* 0x32 */
54399 __u16 vesa_attributes; /* 0x34 */
54400 __u32 capabilities; /* 0x36 */
54401 - __u8 _reserved[6]; /* 0x3a */
54402 + __u16 vesapm_size; /* 0x3a */
54403 + __u8 _reserved[4]; /* 0x3c */
54404 } __attribute__((packed));
54405
54406 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54407 diff -urNp linux-2.6.39.4/include/linux/security.h linux-2.6.39.4/include/linux/security.h
54408 --- linux-2.6.39.4/include/linux/security.h 2011-05-19 00:06:34.000000000 -0400
54409 +++ linux-2.6.39.4/include/linux/security.h 2011-08-05 19:44:37.000000000 -0400
54410 @@ -36,6 +36,7 @@
54411 #include <linux/key.h>
54412 #include <linux/xfrm.h>
54413 #include <linux/slab.h>
54414 +#include <linux/grsecurity.h>
54415 #include <net/flow.h>
54416
54417 /* Maximum number of letters for an LSM name string */
54418 diff -urNp linux-2.6.39.4/include/linux/seq_file.h linux-2.6.39.4/include/linux/seq_file.h
54419 --- linux-2.6.39.4/include/linux/seq_file.h 2011-05-19 00:06:34.000000000 -0400
54420 +++ linux-2.6.39.4/include/linux/seq_file.h 2011-08-05 20:34:06.000000000 -0400
54421 @@ -32,6 +32,7 @@ struct seq_operations {
54422 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54423 int (*show) (struct seq_file *m, void *v);
54424 };
54425 +typedef struct seq_operations __no_const seq_operations_no_const;
54426
54427 #define SEQ_SKIP 1
54428
54429 diff -urNp linux-2.6.39.4/include/linux/shm.h linux-2.6.39.4/include/linux/shm.h
54430 --- linux-2.6.39.4/include/linux/shm.h 2011-05-19 00:06:34.000000000 -0400
54431 +++ linux-2.6.39.4/include/linux/shm.h 2011-08-05 19:44:37.000000000 -0400
54432 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54433 pid_t shm_cprid;
54434 pid_t shm_lprid;
54435 struct user_struct *mlock_user;
54436 +#ifdef CONFIG_GRKERNSEC
54437 + time_t shm_createtime;
54438 + pid_t shm_lapid;
54439 +#endif
54440 };
54441
54442 /* shm_mode upper byte flags */
54443 diff -urNp linux-2.6.39.4/include/linux/skbuff.h linux-2.6.39.4/include/linux/skbuff.h
54444 --- linux-2.6.39.4/include/linux/skbuff.h 2011-05-19 00:06:34.000000000 -0400
54445 +++ linux-2.6.39.4/include/linux/skbuff.h 2011-08-05 19:44:37.000000000 -0400
54446 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54447 */
54448 static inline int skb_queue_empty(const struct sk_buff_head *list)
54449 {
54450 - return list->next == (struct sk_buff *)list;
54451 + return list->next == (const struct sk_buff *)list;
54452 }
54453
54454 /**
54455 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54456 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54457 const struct sk_buff *skb)
54458 {
54459 - return skb->next == (struct sk_buff *)list;
54460 + return skb->next == (const struct sk_buff *)list;
54461 }
54462
54463 /**
54464 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54465 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54466 const struct sk_buff *skb)
54467 {
54468 - return skb->prev == (struct sk_buff *)list;
54469 + return skb->prev == (const struct sk_buff *)list;
54470 }
54471
54472 /**
54473 @@ -1435,7 +1435,7 @@ static inline int pskb_network_may_pull(
54474 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54475 */
54476 #ifndef NET_SKB_PAD
54477 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54478 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54479 #endif
54480
54481 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54482 diff -urNp linux-2.6.39.4/include/linux/slab_def.h linux-2.6.39.4/include/linux/slab_def.h
54483 --- linux-2.6.39.4/include/linux/slab_def.h 2011-05-19 00:06:34.000000000 -0400
54484 +++ linux-2.6.39.4/include/linux/slab_def.h 2011-08-05 19:44:37.000000000 -0400
54485 @@ -96,10 +96,10 @@ struct kmem_cache {
54486 unsigned long node_allocs;
54487 unsigned long node_frees;
54488 unsigned long node_overflow;
54489 - atomic_t allochit;
54490 - atomic_t allocmiss;
54491 - atomic_t freehit;
54492 - atomic_t freemiss;
54493 + atomic_unchecked_t allochit;
54494 + atomic_unchecked_t allocmiss;
54495 + atomic_unchecked_t freehit;
54496 + atomic_unchecked_t freemiss;
54497
54498 /*
54499 * If debugging is enabled, then the allocator can add additional
54500 diff -urNp linux-2.6.39.4/include/linux/slab.h linux-2.6.39.4/include/linux/slab.h
54501 --- linux-2.6.39.4/include/linux/slab.h 2011-05-19 00:06:34.000000000 -0400
54502 +++ linux-2.6.39.4/include/linux/slab.h 2011-08-05 19:44:37.000000000 -0400
54503 @@ -11,12 +11,20 @@
54504
54505 #include <linux/gfp.h>
54506 #include <linux/types.h>
54507 +#include <linux/err.h>
54508
54509 /*
54510 * Flags to pass to kmem_cache_create().
54511 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54512 */
54513 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54514 +
54515 +#ifdef CONFIG_PAX_USERCOPY
54516 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54517 +#else
54518 +#define SLAB_USERCOPY 0x00000000UL
54519 +#endif
54520 +
54521 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54522 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54523 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54524 @@ -87,10 +95,13 @@
54525 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54526 * Both make kfree a no-op.
54527 */
54528 -#define ZERO_SIZE_PTR ((void *)16)
54529 +#define ZERO_SIZE_PTR \
54530 +({ \
54531 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54532 + (void *)(-MAX_ERRNO-1L); \
54533 +})
54534
54535 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54536 - (unsigned long)ZERO_SIZE_PTR)
54537 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54538
54539 /*
54540 * struct kmem_cache related prototypes
54541 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54542 void kfree(const void *);
54543 void kzfree(const void *);
54544 size_t ksize(const void *);
54545 +void check_object_size(const void *ptr, unsigned long n, bool to);
54546
54547 /*
54548 * Allocator specific definitions. These are mainly used to establish optimized
54549 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54550
54551 void __init kmem_cache_init_late(void);
54552
54553 +#define kmalloc(x, y) \
54554 +({ \
54555 + void *___retval; \
54556 + intoverflow_t ___x = (intoverflow_t)x; \
54557 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54558 + ___retval = NULL; \
54559 + else \
54560 + ___retval = kmalloc((size_t)___x, (y)); \
54561 + ___retval; \
54562 +})
54563 +
54564 +#define kmalloc_node(x, y, z) \
54565 +({ \
54566 + void *___retval; \
54567 + intoverflow_t ___x = (intoverflow_t)x; \
54568 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54569 + ___retval = NULL; \
54570 + else \
54571 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
54572 + ___retval; \
54573 +})
54574 +
54575 +#define kzalloc(x, y) \
54576 +({ \
54577 + void *___retval; \
54578 + intoverflow_t ___x = (intoverflow_t)x; \
54579 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54580 + ___retval = NULL; \
54581 + else \
54582 + ___retval = kzalloc((size_t)___x, (y)); \
54583 + ___retval; \
54584 +})
54585 +
54586 +#define __krealloc(x, y, z) \
54587 +({ \
54588 + void *___retval; \
54589 + intoverflow_t ___y = (intoverflow_t)y; \
54590 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54591 + ___retval = NULL; \
54592 + else \
54593 + ___retval = __krealloc((x), (size_t)___y, (z)); \
54594 + ___retval; \
54595 +})
54596 +
54597 +#define krealloc(x, y, z) \
54598 +({ \
54599 + void *___retval; \
54600 + intoverflow_t ___y = (intoverflow_t)y; \
54601 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54602 + ___retval = NULL; \
54603 + else \
54604 + ___retval = krealloc((x), (size_t)___y, (z)); \
54605 + ___retval; \
54606 +})
54607 +
54608 #endif /* _LINUX_SLAB_H */
54609 diff -urNp linux-2.6.39.4/include/linux/slub_def.h linux-2.6.39.4/include/linux/slub_def.h
54610 --- linux-2.6.39.4/include/linux/slub_def.h 2011-05-19 00:06:34.000000000 -0400
54611 +++ linux-2.6.39.4/include/linux/slub_def.h 2011-08-05 20:34:06.000000000 -0400
54612 @@ -84,7 +84,7 @@ struct kmem_cache {
54613 struct kmem_cache_order_objects max;
54614 struct kmem_cache_order_objects min;
54615 gfp_t allocflags; /* gfp flags to use on each alloc */
54616 - int refcount; /* Refcount for slab cache destroy */
54617 + atomic_t refcount; /* Refcount for slab cache destroy */
54618 void (*ctor)(void *);
54619 int inuse; /* Offset to metadata */
54620 int align; /* Alignment */
54621 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54622 }
54623
54624 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54625 -void *__kmalloc(size_t size, gfp_t flags);
54626 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54627
54628 static __always_inline void *
54629 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54630 diff -urNp linux-2.6.39.4/include/linux/sonet.h linux-2.6.39.4/include/linux/sonet.h
54631 --- linux-2.6.39.4/include/linux/sonet.h 2011-05-19 00:06:34.000000000 -0400
54632 +++ linux-2.6.39.4/include/linux/sonet.h 2011-08-05 19:44:37.000000000 -0400
54633 @@ -61,7 +61,7 @@ struct sonet_stats {
54634 #include <asm/atomic.h>
54635
54636 struct k_sonet_stats {
54637 -#define __HANDLE_ITEM(i) atomic_t i
54638 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54639 __SONET_ITEMS
54640 #undef __HANDLE_ITEM
54641 };
54642 diff -urNp linux-2.6.39.4/include/linux/sunrpc/clnt.h linux-2.6.39.4/include/linux/sunrpc/clnt.h
54643 --- linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-05-19 00:06:34.000000000 -0400
54644 +++ linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-08-05 19:44:37.000000000 -0400
54645 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54646 {
54647 switch (sap->sa_family) {
54648 case AF_INET:
54649 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
54650 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54651 case AF_INET6:
54652 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54653 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54654 }
54655 return 0;
54656 }
54657 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54658 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54659 const struct sockaddr *src)
54660 {
54661 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54662 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54663 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54664
54665 dsin->sin_family = ssin->sin_family;
54666 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54667 if (sa->sa_family != AF_INET6)
54668 return 0;
54669
54670 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54671 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54672 }
54673
54674 #endif /* __KERNEL__ */
54675 diff -urNp linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h
54676 --- linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-05-19 00:06:34.000000000 -0400
54677 +++ linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-08-05 19:44:37.000000000 -0400
54678 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54679 extern unsigned int svcrdma_max_requests;
54680 extern unsigned int svcrdma_max_req_size;
54681
54682 -extern atomic_t rdma_stat_recv;
54683 -extern atomic_t rdma_stat_read;
54684 -extern atomic_t rdma_stat_write;
54685 -extern atomic_t rdma_stat_sq_starve;
54686 -extern atomic_t rdma_stat_rq_starve;
54687 -extern atomic_t rdma_stat_rq_poll;
54688 -extern atomic_t rdma_stat_rq_prod;
54689 -extern atomic_t rdma_stat_sq_poll;
54690 -extern atomic_t rdma_stat_sq_prod;
54691 +extern atomic_unchecked_t rdma_stat_recv;
54692 +extern atomic_unchecked_t rdma_stat_read;
54693 +extern atomic_unchecked_t rdma_stat_write;
54694 +extern atomic_unchecked_t rdma_stat_sq_starve;
54695 +extern atomic_unchecked_t rdma_stat_rq_starve;
54696 +extern atomic_unchecked_t rdma_stat_rq_poll;
54697 +extern atomic_unchecked_t rdma_stat_rq_prod;
54698 +extern atomic_unchecked_t rdma_stat_sq_poll;
54699 +extern atomic_unchecked_t rdma_stat_sq_prod;
54700
54701 #define RPCRDMA_VERSION 1
54702
54703 diff -urNp linux-2.6.39.4/include/linux/sysctl.h linux-2.6.39.4/include/linux/sysctl.h
54704 --- linux-2.6.39.4/include/linux/sysctl.h 2011-05-19 00:06:34.000000000 -0400
54705 +++ linux-2.6.39.4/include/linux/sysctl.h 2011-08-05 19:44:37.000000000 -0400
54706 @@ -155,7 +155,11 @@ enum
54707 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54708 };
54709
54710 -
54711 +#ifdef CONFIG_PAX_SOFTMODE
54712 +enum {
54713 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54714 +};
54715 +#endif
54716
54717 /* CTL_VM names: */
54718 enum
54719 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54720
54721 extern int proc_dostring(struct ctl_table *, int,
54722 void __user *, size_t *, loff_t *);
54723 +extern int proc_dostring_modpriv(struct ctl_table *, int,
54724 + void __user *, size_t *, loff_t *);
54725 extern int proc_dointvec(struct ctl_table *, int,
54726 void __user *, size_t *, loff_t *);
54727 extern int proc_dointvec_minmax(struct ctl_table *, int,
54728 diff -urNp linux-2.6.39.4/include/linux/tty_ldisc.h linux-2.6.39.4/include/linux/tty_ldisc.h
54729 --- linux-2.6.39.4/include/linux/tty_ldisc.h 2011-05-19 00:06:34.000000000 -0400
54730 +++ linux-2.6.39.4/include/linux/tty_ldisc.h 2011-08-05 19:44:37.000000000 -0400
54731 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54732
54733 struct module *owner;
54734
54735 - int refcount;
54736 + atomic_t refcount;
54737 };
54738
54739 struct tty_ldisc {
54740 diff -urNp linux-2.6.39.4/include/linux/types.h linux-2.6.39.4/include/linux/types.h
54741 --- linux-2.6.39.4/include/linux/types.h 2011-05-19 00:06:34.000000000 -0400
54742 +++ linux-2.6.39.4/include/linux/types.h 2011-08-05 19:44:37.000000000 -0400
54743 @@ -213,10 +213,26 @@ typedef struct {
54744 int counter;
54745 } atomic_t;
54746
54747 +#ifdef CONFIG_PAX_REFCOUNT
54748 +typedef struct {
54749 + int counter;
54750 +} atomic_unchecked_t;
54751 +#else
54752 +typedef atomic_t atomic_unchecked_t;
54753 +#endif
54754 +
54755 #ifdef CONFIG_64BIT
54756 typedef struct {
54757 long counter;
54758 } atomic64_t;
54759 +
54760 +#ifdef CONFIG_PAX_REFCOUNT
54761 +typedef struct {
54762 + long counter;
54763 +} atomic64_unchecked_t;
54764 +#else
54765 +typedef atomic64_t atomic64_unchecked_t;
54766 +#endif
54767 #endif
54768
54769 struct list_head {
54770 diff -urNp linux-2.6.39.4/include/linux/uaccess.h linux-2.6.39.4/include/linux/uaccess.h
54771 --- linux-2.6.39.4/include/linux/uaccess.h 2011-05-19 00:06:34.000000000 -0400
54772 +++ linux-2.6.39.4/include/linux/uaccess.h 2011-08-05 19:44:37.000000000 -0400
54773 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54774 long ret; \
54775 mm_segment_t old_fs = get_fs(); \
54776 \
54777 - set_fs(KERNEL_DS); \
54778 pagefault_disable(); \
54779 + set_fs(KERNEL_DS); \
54780 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54781 - pagefault_enable(); \
54782 set_fs(old_fs); \
54783 + pagefault_enable(); \
54784 ret; \
54785 })
54786
54787 @@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
54788 * Safely read from address @src to the buffer at @dst. If a kernel fault
54789 * happens, handle that and return -EFAULT.
54790 */
54791 -extern long probe_kernel_read(void *dst, void *src, size_t size);
54792 -extern long __probe_kernel_read(void *dst, void *src, size_t size);
54793 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
54794 +extern long __probe_kernel_read(void *dst, const void *src, size_t size);
54795
54796 /*
54797 * probe_kernel_write(): safely attempt to write to a location
54798 @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
54799 * Safely write to address @dst from the buffer at @src. If a kernel fault
54800 * happens, handle that and return -EFAULT.
54801 */
54802 -extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
54803 -extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
54804 +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
54805 +extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
54806
54807 #endif /* __LINUX_UACCESS_H__ */
54808 diff -urNp linux-2.6.39.4/include/linux/unaligned/access_ok.h linux-2.6.39.4/include/linux/unaligned/access_ok.h
54809 --- linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-05-19 00:06:34.000000000 -0400
54810 +++ linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-08-05 19:44:37.000000000 -0400
54811 @@ -6,32 +6,32 @@
54812
54813 static inline u16 get_unaligned_le16(const void *p)
54814 {
54815 - return le16_to_cpup((__le16 *)p);
54816 + return le16_to_cpup((const __le16 *)p);
54817 }
54818
54819 static inline u32 get_unaligned_le32(const void *p)
54820 {
54821 - return le32_to_cpup((__le32 *)p);
54822 + return le32_to_cpup((const __le32 *)p);
54823 }
54824
54825 static inline u64 get_unaligned_le64(const void *p)
54826 {
54827 - return le64_to_cpup((__le64 *)p);
54828 + return le64_to_cpup((const __le64 *)p);
54829 }
54830
54831 static inline u16 get_unaligned_be16(const void *p)
54832 {
54833 - return be16_to_cpup((__be16 *)p);
54834 + return be16_to_cpup((const __be16 *)p);
54835 }
54836
54837 static inline u32 get_unaligned_be32(const void *p)
54838 {
54839 - return be32_to_cpup((__be32 *)p);
54840 + return be32_to_cpup((const __be32 *)p);
54841 }
54842
54843 static inline u64 get_unaligned_be64(const void *p)
54844 {
54845 - return be64_to_cpup((__be64 *)p);
54846 + return be64_to_cpup((const __be64 *)p);
54847 }
54848
54849 static inline void put_unaligned_le16(u16 val, void *p)
54850 diff -urNp linux-2.6.39.4/include/linux/vmalloc.h linux-2.6.39.4/include/linux/vmalloc.h
54851 --- linux-2.6.39.4/include/linux/vmalloc.h 2011-05-19 00:06:34.000000000 -0400
54852 +++ linux-2.6.39.4/include/linux/vmalloc.h 2011-08-05 19:44:37.000000000 -0400
54853 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54854 #define VM_MAP 0x00000004 /* vmap()ed pages */
54855 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54856 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54857 +
54858 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54859 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54860 +#endif
54861 +
54862 /* bits [20..32] reserved for arch specific ioremap internals */
54863
54864 /*
54865 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54866 # endif
54867 #endif
54868
54869 +#define vmalloc(x) \
54870 +({ \
54871 + void *___retval; \
54872 + intoverflow_t ___x = (intoverflow_t)x; \
54873 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54874 + ___retval = NULL; \
54875 + else \
54876 + ___retval = vmalloc((unsigned long)___x); \
54877 + ___retval; \
54878 +})
54879 +
54880 +#define vzalloc(x) \
54881 +({ \
54882 + void *___retval; \
54883 + intoverflow_t ___x = (intoverflow_t)x; \
54884 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54885 + ___retval = NULL; \
54886 + else \
54887 + ___retval = vzalloc((unsigned long)___x); \
54888 + ___retval; \
54889 +})
54890 +
54891 +#define __vmalloc(x, y, z) \
54892 +({ \
54893 + void *___retval; \
54894 + intoverflow_t ___x = (intoverflow_t)x; \
54895 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54896 + ___retval = NULL; \
54897 + else \
54898 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54899 + ___retval; \
54900 +})
54901 +
54902 +#define vmalloc_user(x) \
54903 +({ \
54904 + void *___retval; \
54905 + intoverflow_t ___x = (intoverflow_t)x; \
54906 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54907 + ___retval = NULL; \
54908 + else \
54909 + ___retval = vmalloc_user((unsigned long)___x); \
54910 + ___retval; \
54911 +})
54912 +
54913 +#define vmalloc_exec(x) \
54914 +({ \
54915 + void *___retval; \
54916 + intoverflow_t ___x = (intoverflow_t)x; \
54917 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
54918 + ___retval = NULL; \
54919 + else \
54920 + ___retval = vmalloc_exec((unsigned long)___x); \
54921 + ___retval; \
54922 +})
54923 +
54924 +#define vmalloc_node(x, y) \
54925 +({ \
54926 + void *___retval; \
54927 + intoverflow_t ___x = (intoverflow_t)x; \
54928 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
54929 + ___retval = NULL; \
54930 + else \
54931 + ___retval = vmalloc_node((unsigned long)___x, (y));\
54932 + ___retval; \
54933 +})
54934 +
54935 +#define vzalloc_node(x, y) \
54936 +({ \
54937 + void *___retval; \
54938 + intoverflow_t ___x = (intoverflow_t)x; \
54939 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
54940 + ___retval = NULL; \
54941 + else \
54942 + ___retval = vzalloc_node((unsigned long)___x, (y));\
54943 + ___retval; \
54944 +})
54945 +
54946 +#define vmalloc_32(x) \
54947 +({ \
54948 + void *___retval; \
54949 + intoverflow_t ___x = (intoverflow_t)x; \
54950 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
54951 + ___retval = NULL; \
54952 + else \
54953 + ___retval = vmalloc_32((unsigned long)___x); \
54954 + ___retval; \
54955 +})
54956 +
54957 +#define vmalloc_32_user(x) \
54958 +({ \
54959 +void *___retval; \
54960 + intoverflow_t ___x = (intoverflow_t)x; \
54961 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
54962 + ___retval = NULL; \
54963 + else \
54964 + ___retval = vmalloc_32_user((unsigned long)___x);\
54965 + ___retval; \
54966 +})
54967 +
54968 #endif /* _LINUX_VMALLOC_H */
54969 diff -urNp linux-2.6.39.4/include/linux/vmstat.h linux-2.6.39.4/include/linux/vmstat.h
54970 --- linux-2.6.39.4/include/linux/vmstat.h 2011-05-19 00:06:34.000000000 -0400
54971 +++ linux-2.6.39.4/include/linux/vmstat.h 2011-08-05 19:44:37.000000000 -0400
54972 @@ -147,18 +147,18 @@ static inline void vm_events_fold_cpu(in
54973 /*
54974 * Zone based page accounting with per cpu differentials.
54975 */
54976 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54977 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
54978
54979 static inline void zone_page_state_add(long x, struct zone *zone,
54980 enum zone_stat_item item)
54981 {
54982 - atomic_long_add(x, &zone->vm_stat[item]);
54983 - atomic_long_add(x, &vm_stat[item]);
54984 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
54985 + atomic_long_add_unchecked(x, &vm_stat[item]);
54986 }
54987
54988 static inline unsigned long global_page_state(enum zone_stat_item item)
54989 {
54990 - long x = atomic_long_read(&vm_stat[item]);
54991 + long x = atomic_long_read_unchecked(&vm_stat[item]);
54992 #ifdef CONFIG_SMP
54993 if (x < 0)
54994 x = 0;
54995 @@ -169,7 +169,7 @@ static inline unsigned long global_page_
54996 static inline unsigned long zone_page_state(struct zone *zone,
54997 enum zone_stat_item item)
54998 {
54999 - long x = atomic_long_read(&zone->vm_stat[item]);
55000 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55001 #ifdef CONFIG_SMP
55002 if (x < 0)
55003 x = 0;
55004 @@ -186,7 +186,7 @@ static inline unsigned long zone_page_st
55005 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
55006 enum zone_stat_item item)
55007 {
55008 - long x = atomic_long_read(&zone->vm_stat[item]);
55009 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55010
55011 #ifdef CONFIG_SMP
55012 int cpu;
55013 @@ -280,8 +280,8 @@ static inline void __mod_zone_page_state
55014
55015 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
55016 {
55017 - atomic_long_inc(&zone->vm_stat[item]);
55018 - atomic_long_inc(&vm_stat[item]);
55019 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
55020 + atomic_long_inc_unchecked(&vm_stat[item]);
55021 }
55022
55023 static inline void __inc_zone_page_state(struct page *page,
55024 @@ -292,8 +292,8 @@ static inline void __inc_zone_page_state
55025
55026 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
55027 {
55028 - atomic_long_dec(&zone->vm_stat[item]);
55029 - atomic_long_dec(&vm_stat[item]);
55030 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
55031 + atomic_long_dec_unchecked(&vm_stat[item]);
55032 }
55033
55034 static inline void __dec_zone_page_state(struct page *page,
55035 diff -urNp linux-2.6.39.4/include/media/saa7146_vv.h linux-2.6.39.4/include/media/saa7146_vv.h
55036 --- linux-2.6.39.4/include/media/saa7146_vv.h 2011-05-19 00:06:34.000000000 -0400
55037 +++ linux-2.6.39.4/include/media/saa7146_vv.h 2011-08-05 20:34:06.000000000 -0400
55038 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
55039 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
55040
55041 /* the extension can override this */
55042 - struct v4l2_ioctl_ops ops;
55043 + v4l2_ioctl_ops_no_const ops;
55044 /* pointer to the saa7146 core ops */
55045 const struct v4l2_ioctl_ops *core_ops;
55046
55047 diff -urNp linux-2.6.39.4/include/media/v4l2-dev.h linux-2.6.39.4/include/media/v4l2-dev.h
55048 --- linux-2.6.39.4/include/media/v4l2-dev.h 2011-05-19 00:06:34.000000000 -0400
55049 +++ linux-2.6.39.4/include/media/v4l2-dev.h 2011-08-05 20:34:06.000000000 -0400
55050 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
55051
55052
55053 struct v4l2_file_operations {
55054 - struct module *owner;
55055 + struct module * const owner;
55056 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
55057 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
55058 unsigned int (*poll) (struct file *, struct poll_table_struct *);
55059 diff -urNp linux-2.6.39.4/include/media/v4l2-device.h linux-2.6.39.4/include/media/v4l2-device.h
55060 --- linux-2.6.39.4/include/media/v4l2-device.h 2011-05-19 00:06:34.000000000 -0400
55061 +++ linux-2.6.39.4/include/media/v4l2-device.h 2011-08-05 19:44:37.000000000 -0400
55062 @@ -95,7 +95,7 @@ int __must_check v4l2_device_register(st
55063 this function returns 0. If the name ends with a digit (e.g. cx18),
55064 then the name will be set to cx18-0 since cx180 looks really odd. */
55065 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
55066 - atomic_t *instance);
55067 + atomic_unchecked_t *instance);
55068
55069 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
55070 Since the parent disappears this ensures that v4l2_dev doesn't have an
55071 diff -urNp linux-2.6.39.4/include/media/v4l2-ioctl.h linux-2.6.39.4/include/media/v4l2-ioctl.h
55072 --- linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-05-19 00:06:34.000000000 -0400
55073 +++ linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-08-05 20:34:06.000000000 -0400
55074 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
55075 long (*vidioc_default) (struct file *file, void *fh,
55076 bool valid_prio, int cmd, void *arg);
55077 };
55078 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
55079
55080
55081 /* v4l debugging and diagnostics */
55082 diff -urNp linux-2.6.39.4/include/net/caif/cfctrl.h linux-2.6.39.4/include/net/caif/cfctrl.h
55083 --- linux-2.6.39.4/include/net/caif/cfctrl.h 2011-05-19 00:06:34.000000000 -0400
55084 +++ linux-2.6.39.4/include/net/caif/cfctrl.h 2011-08-05 20:34:06.000000000 -0400
55085 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
55086 void (*radioset_rsp)(void);
55087 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
55088 struct cflayer *client_layer);
55089 -};
55090 +} __no_const;
55091
55092 /* Link Setup Parameters for CAIF-Links. */
55093 struct cfctrl_link_param {
55094 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
55095 struct cfctrl {
55096 struct cfsrvl serv;
55097 struct cfctrl_rsp res;
55098 - atomic_t req_seq_no;
55099 - atomic_t rsp_seq_no;
55100 + atomic_unchecked_t req_seq_no;
55101 + atomic_unchecked_t rsp_seq_no;
55102 struct list_head list;
55103 /* Protects from simultaneous access to first_req list */
55104 spinlock_t info_list_lock;
55105 diff -urNp linux-2.6.39.4/include/net/flow.h linux-2.6.39.4/include/net/flow.h
55106 --- linux-2.6.39.4/include/net/flow.h 2011-05-19 00:06:34.000000000 -0400
55107 +++ linux-2.6.39.4/include/net/flow.h 2011-08-05 19:44:37.000000000 -0400
55108 @@ -167,6 +167,6 @@ extern struct flow_cache_object *flow_ca
55109 u8 dir, flow_resolve_t resolver, void *ctx);
55110
55111 extern void flow_cache_flush(void);
55112 -extern atomic_t flow_cache_genid;
55113 +extern atomic_unchecked_t flow_cache_genid;
55114
55115 #endif
55116 diff -urNp linux-2.6.39.4/include/net/inetpeer.h linux-2.6.39.4/include/net/inetpeer.h
55117 --- linux-2.6.39.4/include/net/inetpeer.h 2011-05-19 00:06:34.000000000 -0400
55118 +++ linux-2.6.39.4/include/net/inetpeer.h 2011-08-05 19:44:37.000000000 -0400
55119 @@ -43,8 +43,8 @@ struct inet_peer {
55120 */
55121 union {
55122 struct {
55123 - atomic_t rid; /* Frag reception counter */
55124 - atomic_t ip_id_count; /* IP ID for the next packet */
55125 + atomic_unchecked_t rid; /* Frag reception counter */
55126 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
55127 __u32 tcp_ts;
55128 __u32 tcp_ts_stamp;
55129 u32 metrics[RTAX_MAX];
55130 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
55131 {
55132 more++;
55133 inet_peer_refcheck(p);
55134 - return atomic_add_return(more, &p->ip_id_count) - more;
55135 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
55136 }
55137
55138 #endif /* _NET_INETPEER_H */
55139 diff -urNp linux-2.6.39.4/include/net/ip_fib.h linux-2.6.39.4/include/net/ip_fib.h
55140 --- linux-2.6.39.4/include/net/ip_fib.h 2011-05-19 00:06:34.000000000 -0400
55141 +++ linux-2.6.39.4/include/net/ip_fib.h 2011-08-05 19:44:37.000000000 -0400
55142 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
55143
55144 #define FIB_RES_SADDR(net, res) \
55145 ((FIB_RES_NH(res).nh_saddr_genid == \
55146 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55147 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55148 FIB_RES_NH(res).nh_saddr : \
55149 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55150 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55151 diff -urNp linux-2.6.39.4/include/net/ip_vs.h linux-2.6.39.4/include/net/ip_vs.h
55152 --- linux-2.6.39.4/include/net/ip_vs.h 2011-07-09 09:18:51.000000000 -0400
55153 +++ linux-2.6.39.4/include/net/ip_vs.h 2011-08-05 19:44:37.000000000 -0400
55154 @@ -512,7 +512,7 @@ struct ip_vs_conn {
55155 struct ip_vs_conn *control; /* Master control connection */
55156 atomic_t n_control; /* Number of controlled ones */
55157 struct ip_vs_dest *dest; /* real server */
55158 - atomic_t in_pkts; /* incoming packet counter */
55159 + atomic_unchecked_t in_pkts; /* incoming packet counter */
55160
55161 /* packet transmitter for different forwarding methods. If it
55162 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55163 @@ -650,7 +650,7 @@ struct ip_vs_dest {
55164 __be16 port; /* port number of the server */
55165 union nf_inet_addr addr; /* IP address of the server */
55166 volatile unsigned flags; /* dest status flags */
55167 - atomic_t conn_flags; /* flags to copy to conn */
55168 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
55169 atomic_t weight; /* server weight */
55170
55171 atomic_t refcnt; /* reference counter */
55172 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_core.h linux-2.6.39.4/include/net/irda/ircomm_core.h
55173 --- linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-05-19 00:06:34.000000000 -0400
55174 +++ linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-08-05 20:34:06.000000000 -0400
55175 @@ -51,7 +51,7 @@ typedef struct {
55176 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55177 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55178 struct ircomm_info *);
55179 -} call_t;
55180 +} __no_const call_t;
55181
55182 struct ircomm_cb {
55183 irda_queue_t queue;
55184 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_tty.h linux-2.6.39.4/include/net/irda/ircomm_tty.h
55185 --- linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-05-19 00:06:34.000000000 -0400
55186 +++ linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-08-05 19:44:37.000000000 -0400
55187 @@ -35,6 +35,7 @@
55188 #include <linux/termios.h>
55189 #include <linux/timer.h>
55190 #include <linux/tty.h> /* struct tty_struct */
55191 +#include <asm/local.h>
55192
55193 #include <net/irda/irias_object.h>
55194 #include <net/irda/ircomm_core.h>
55195 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55196 unsigned short close_delay;
55197 unsigned short closing_wait; /* time to wait before closing */
55198
55199 - int open_count;
55200 - int blocked_open; /* # of blocked opens */
55201 + local_t open_count;
55202 + local_t blocked_open; /* # of blocked opens */
55203
55204 /* Protect concurent access to :
55205 * o self->open_count
55206 diff -urNp linux-2.6.39.4/include/net/iucv/af_iucv.h linux-2.6.39.4/include/net/iucv/af_iucv.h
55207 --- linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-05-19 00:06:34.000000000 -0400
55208 +++ linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-08-05 19:44:37.000000000 -0400
55209 @@ -87,7 +87,7 @@ struct iucv_sock {
55210 struct iucv_sock_list {
55211 struct hlist_head head;
55212 rwlock_t lock;
55213 - atomic_t autobind_name;
55214 + atomic_unchecked_t autobind_name;
55215 };
55216
55217 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55218 diff -urNp linux-2.6.39.4/include/net/lapb.h linux-2.6.39.4/include/net/lapb.h
55219 --- linux-2.6.39.4/include/net/lapb.h 2011-05-19 00:06:34.000000000 -0400
55220 +++ linux-2.6.39.4/include/net/lapb.h 2011-08-05 20:34:06.000000000 -0400
55221 @@ -95,7 +95,7 @@ struct lapb_cb {
55222 struct sk_buff_head write_queue;
55223 struct sk_buff_head ack_queue;
55224 unsigned char window;
55225 - struct lapb_register_struct callbacks;
55226 + struct lapb_register_struct *callbacks;
55227
55228 /* FRMR control information */
55229 struct lapb_frame frmr_data;
55230 diff -urNp linux-2.6.39.4/include/net/neighbour.h linux-2.6.39.4/include/net/neighbour.h
55231 --- linux-2.6.39.4/include/net/neighbour.h 2011-05-19 00:06:34.000000000 -0400
55232 +++ linux-2.6.39.4/include/net/neighbour.h 2011-08-05 20:34:06.000000000 -0400
55233 @@ -117,7 +117,7 @@ struct neighbour {
55234 };
55235
55236 struct neigh_ops {
55237 - int family;
55238 + const int family;
55239 void (*solicit)(struct neighbour *, struct sk_buff*);
55240 void (*error_report)(struct neighbour *, struct sk_buff*);
55241 int (*output)(struct sk_buff*);
55242 diff -urNp linux-2.6.39.4/include/net/netlink.h linux-2.6.39.4/include/net/netlink.h
55243 --- linux-2.6.39.4/include/net/netlink.h 2011-05-19 00:06:34.000000000 -0400
55244 +++ linux-2.6.39.4/include/net/netlink.h 2011-08-05 19:44:37.000000000 -0400
55245 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55246 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55247 {
55248 if (mark)
55249 - skb_trim(skb, (unsigned char *) mark - skb->data);
55250 + skb_trim(skb, (const unsigned char *) mark - skb->data);
55251 }
55252
55253 /**
55254 diff -urNp linux-2.6.39.4/include/net/netns/ipv4.h linux-2.6.39.4/include/net/netns/ipv4.h
55255 --- linux-2.6.39.4/include/net/netns/ipv4.h 2011-05-19 00:06:34.000000000 -0400
55256 +++ linux-2.6.39.4/include/net/netns/ipv4.h 2011-08-05 19:44:37.000000000 -0400
55257 @@ -54,8 +54,8 @@ struct netns_ipv4 {
55258 int sysctl_rt_cache_rebuild_count;
55259 int current_rt_cache_rebuild_count;
55260
55261 - atomic_t rt_genid;
55262 - atomic_t dev_addr_genid;
55263 + atomic_unchecked_t rt_genid;
55264 + atomic_unchecked_t dev_addr_genid;
55265
55266 #ifdef CONFIG_IP_MROUTE
55267 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55268 diff -urNp linux-2.6.39.4/include/net/sctp/sctp.h linux-2.6.39.4/include/net/sctp/sctp.h
55269 --- linux-2.6.39.4/include/net/sctp/sctp.h 2011-05-19 00:06:34.000000000 -0400
55270 +++ linux-2.6.39.4/include/net/sctp/sctp.h 2011-08-05 19:44:37.000000000 -0400
55271 @@ -316,9 +316,9 @@ do { \
55272
55273 #else /* SCTP_DEBUG */
55274
55275 -#define SCTP_DEBUG_PRINTK(whatever...)
55276 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55277 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55278 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55279 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55280 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55281 #define SCTP_ENABLE_DEBUG
55282 #define SCTP_DISABLE_DEBUG
55283 #define SCTP_ASSERT(expr, str, func)
55284 diff -urNp linux-2.6.39.4/include/net/sock.h linux-2.6.39.4/include/net/sock.h
55285 --- linux-2.6.39.4/include/net/sock.h 2011-05-19 00:06:34.000000000 -0400
55286 +++ linux-2.6.39.4/include/net/sock.h 2011-08-05 19:44:37.000000000 -0400
55287 @@ -277,7 +277,7 @@ struct sock {
55288 #ifdef CONFIG_RPS
55289 __u32 sk_rxhash;
55290 #endif
55291 - atomic_t sk_drops;
55292 + atomic_unchecked_t sk_drops;
55293 int sk_rcvbuf;
55294
55295 struct sk_filter __rcu *sk_filter;
55296 diff -urNp linux-2.6.39.4/include/net/tcp.h linux-2.6.39.4/include/net/tcp.h
55297 --- linux-2.6.39.4/include/net/tcp.h 2011-05-19 00:06:34.000000000 -0400
55298 +++ linux-2.6.39.4/include/net/tcp.h 2011-08-05 20:34:06.000000000 -0400
55299 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55300 struct tcp_seq_afinfo {
55301 char *name;
55302 sa_family_t family;
55303 - struct file_operations seq_fops;
55304 - struct seq_operations seq_ops;
55305 + file_operations_no_const seq_fops;
55306 + seq_operations_no_const seq_ops;
55307 };
55308
55309 struct tcp_iter_state {
55310 diff -urNp linux-2.6.39.4/include/net/udp.h linux-2.6.39.4/include/net/udp.h
55311 --- linux-2.6.39.4/include/net/udp.h 2011-05-19 00:06:34.000000000 -0400
55312 +++ linux-2.6.39.4/include/net/udp.h 2011-08-05 20:34:06.000000000 -0400
55313 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55314 char *name;
55315 sa_family_t family;
55316 struct udp_table *udp_table;
55317 - struct file_operations seq_fops;
55318 - struct seq_operations seq_ops;
55319 + file_operations_no_const seq_fops;
55320 + seq_operations_no_const seq_ops;
55321 };
55322
55323 struct udp_iter_state {
55324 diff -urNp linux-2.6.39.4/include/net/xfrm.h linux-2.6.39.4/include/net/xfrm.h
55325 --- linux-2.6.39.4/include/net/xfrm.h 2011-05-19 00:06:34.000000000 -0400
55326 +++ linux-2.6.39.4/include/net/xfrm.h 2011-08-05 19:44:37.000000000 -0400
55327 @@ -505,7 +505,7 @@ struct xfrm_policy {
55328 struct timer_list timer;
55329
55330 struct flow_cache_object flo;
55331 - atomic_t genid;
55332 + atomic_unchecked_t genid;
55333 u32 priority;
55334 u32 index;
55335 struct xfrm_mark mark;
55336 diff -urNp linux-2.6.39.4/include/rdma/iw_cm.h linux-2.6.39.4/include/rdma/iw_cm.h
55337 --- linux-2.6.39.4/include/rdma/iw_cm.h 2011-05-19 00:06:34.000000000 -0400
55338 +++ linux-2.6.39.4/include/rdma/iw_cm.h 2011-08-05 20:34:06.000000000 -0400
55339 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
55340 int backlog);
55341
55342 int (*destroy_listen)(struct iw_cm_id *cm_id);
55343 -};
55344 +} __no_const;
55345
55346 /**
55347 * iw_create_cm_id - Create an IW CM identifier.
55348 diff -urNp linux-2.6.39.4/include/scsi/libfc.h linux-2.6.39.4/include/scsi/libfc.h
55349 --- linux-2.6.39.4/include/scsi/libfc.h 2011-05-19 00:06:34.000000000 -0400
55350 +++ linux-2.6.39.4/include/scsi/libfc.h 2011-08-05 20:34:06.000000000 -0400
55351 @@ -750,6 +750,7 @@ struct libfc_function_template {
55352 */
55353 void (*disc_stop_final) (struct fc_lport *);
55354 };
55355 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55356
55357 /**
55358 * struct fc_disc - Discovery context
55359 @@ -853,7 +854,7 @@ struct fc_lport {
55360 struct fc_vport *vport;
55361
55362 /* Operational Information */
55363 - struct libfc_function_template tt;
55364 + libfc_function_template_no_const tt;
55365 u8 link_up;
55366 u8 qfull;
55367 enum fc_lport_state state;
55368 diff -urNp linux-2.6.39.4/include/scsi/scsi_device.h linux-2.6.39.4/include/scsi/scsi_device.h
55369 --- linux-2.6.39.4/include/scsi/scsi_device.h 2011-05-19 00:06:34.000000000 -0400
55370 +++ linux-2.6.39.4/include/scsi/scsi_device.h 2011-08-05 19:44:37.000000000 -0400
55371 @@ -161,9 +161,9 @@ struct scsi_device {
55372 unsigned int max_device_blocked; /* what device_blocked counts down from */
55373 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55374
55375 - atomic_t iorequest_cnt;
55376 - atomic_t iodone_cnt;
55377 - atomic_t ioerr_cnt;
55378 + atomic_unchecked_t iorequest_cnt;
55379 + atomic_unchecked_t iodone_cnt;
55380 + atomic_unchecked_t ioerr_cnt;
55381
55382 struct device sdev_gendev,
55383 sdev_dev;
55384 diff -urNp linux-2.6.39.4/include/scsi/scsi_transport_fc.h linux-2.6.39.4/include/scsi/scsi_transport_fc.h
55385 --- linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-05-19 00:06:34.000000000 -0400
55386 +++ linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-08-05 20:34:06.000000000 -0400
55387 @@ -666,9 +666,9 @@ struct fc_function_template {
55388 int (*bsg_timeout)(struct fc_bsg_job *);
55389
55390 /* allocation lengths for host-specific data */
55391 - u32 dd_fcrport_size;
55392 - u32 dd_fcvport_size;
55393 - u32 dd_bsg_size;
55394 + const u32 dd_fcrport_size;
55395 + const u32 dd_fcvport_size;
55396 + const u32 dd_bsg_size;
55397
55398 /*
55399 * The driver sets these to tell the transport class it
55400 @@ -678,39 +678,39 @@ struct fc_function_template {
55401 */
55402
55403 /* remote port fixed attributes */
55404 - unsigned long show_rport_maxframe_size:1;
55405 - unsigned long show_rport_supported_classes:1;
55406 - unsigned long show_rport_dev_loss_tmo:1;
55407 + const unsigned long show_rport_maxframe_size:1;
55408 + const unsigned long show_rport_supported_classes:1;
55409 + const unsigned long show_rport_dev_loss_tmo:1;
55410
55411 /*
55412 * target dynamic attributes
55413 * These should all be "1" if the driver uses the remote port
55414 * add/delete functions (so attributes reflect rport values).
55415 */
55416 - unsigned long show_starget_node_name:1;
55417 - unsigned long show_starget_port_name:1;
55418 - unsigned long show_starget_port_id:1;
55419 + const unsigned long show_starget_node_name:1;
55420 + const unsigned long show_starget_port_name:1;
55421 + const unsigned long show_starget_port_id:1;
55422
55423 /* host fixed attributes */
55424 - unsigned long show_host_node_name:1;
55425 - unsigned long show_host_port_name:1;
55426 - unsigned long show_host_permanent_port_name:1;
55427 - unsigned long show_host_supported_classes:1;
55428 - unsigned long show_host_supported_fc4s:1;
55429 - unsigned long show_host_supported_speeds:1;
55430 - unsigned long show_host_maxframe_size:1;
55431 - unsigned long show_host_serial_number:1;
55432 + const unsigned long show_host_node_name:1;
55433 + const unsigned long show_host_port_name:1;
55434 + const unsigned long show_host_permanent_port_name:1;
55435 + const unsigned long show_host_supported_classes:1;
55436 + const unsigned long show_host_supported_fc4s:1;
55437 + const unsigned long show_host_supported_speeds:1;
55438 + const unsigned long show_host_maxframe_size:1;
55439 + const unsigned long show_host_serial_number:1;
55440 /* host dynamic attributes */
55441 - unsigned long show_host_port_id:1;
55442 - unsigned long show_host_port_type:1;
55443 - unsigned long show_host_port_state:1;
55444 - unsigned long show_host_active_fc4s:1;
55445 - unsigned long show_host_speed:1;
55446 - unsigned long show_host_fabric_name:1;
55447 - unsigned long show_host_symbolic_name:1;
55448 - unsigned long show_host_system_hostname:1;
55449 + const unsigned long show_host_port_id:1;
55450 + const unsigned long show_host_port_type:1;
55451 + const unsigned long show_host_port_state:1;
55452 + const unsigned long show_host_active_fc4s:1;
55453 + const unsigned long show_host_speed:1;
55454 + const unsigned long show_host_fabric_name:1;
55455 + const unsigned long show_host_symbolic_name:1;
55456 + const unsigned long show_host_system_hostname:1;
55457
55458 - unsigned long disable_target_scan:1;
55459 + const unsigned long disable_target_scan:1;
55460 };
55461
55462
55463 diff -urNp linux-2.6.39.4/include/sound/ak4xxx-adda.h linux-2.6.39.4/include/sound/ak4xxx-adda.h
55464 --- linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-05-19 00:06:34.000000000 -0400
55465 +++ linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-08-05 20:34:06.000000000 -0400
55466 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55467 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55468 unsigned char val);
55469 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55470 -};
55471 +} __no_const;
55472
55473 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55474
55475 diff -urNp linux-2.6.39.4/include/sound/hwdep.h linux-2.6.39.4/include/sound/hwdep.h
55476 --- linux-2.6.39.4/include/sound/hwdep.h 2011-05-19 00:06:34.000000000 -0400
55477 +++ linux-2.6.39.4/include/sound/hwdep.h 2011-08-05 20:34:06.000000000 -0400
55478 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55479 struct snd_hwdep_dsp_status *status);
55480 int (*dsp_load)(struct snd_hwdep *hw,
55481 struct snd_hwdep_dsp_image *image);
55482 -};
55483 +} __no_const;
55484
55485 struct snd_hwdep {
55486 struct snd_card *card;
55487 diff -urNp linux-2.6.39.4/include/sound/info.h linux-2.6.39.4/include/sound/info.h
55488 --- linux-2.6.39.4/include/sound/info.h 2011-05-19 00:06:34.000000000 -0400
55489 +++ linux-2.6.39.4/include/sound/info.h 2011-08-05 20:34:06.000000000 -0400
55490 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
55491 struct snd_info_buffer *buffer);
55492 void (*write)(struct snd_info_entry *entry,
55493 struct snd_info_buffer *buffer);
55494 -};
55495 +} __no_const;
55496
55497 struct snd_info_entry_ops {
55498 int (*open)(struct snd_info_entry *entry,
55499 diff -urNp linux-2.6.39.4/include/sound/pcm.h linux-2.6.39.4/include/sound/pcm.h
55500 --- linux-2.6.39.4/include/sound/pcm.h 2011-05-19 00:06:34.000000000 -0400
55501 +++ linux-2.6.39.4/include/sound/pcm.h 2011-08-05 20:34:06.000000000 -0400
55502 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
55503 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55504 int (*ack)(struct snd_pcm_substream *substream);
55505 };
55506 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55507
55508 /*
55509 *
55510 diff -urNp linux-2.6.39.4/include/sound/sb16_csp.h linux-2.6.39.4/include/sound/sb16_csp.h
55511 --- linux-2.6.39.4/include/sound/sb16_csp.h 2011-05-19 00:06:34.000000000 -0400
55512 +++ linux-2.6.39.4/include/sound/sb16_csp.h 2011-08-05 20:34:06.000000000 -0400
55513 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
55514 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55515 int (*csp_stop) (struct snd_sb_csp * p);
55516 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55517 -};
55518 +} __no_const;
55519
55520 /*
55521 * CSP private data
55522 diff -urNp linux-2.6.39.4/include/sound/soc.h linux-2.6.39.4/include/sound/soc.h
55523 --- linux-2.6.39.4/include/sound/soc.h 2011-05-19 00:06:34.000000000 -0400
55524 +++ linux-2.6.39.4/include/sound/soc.h 2011-08-05 20:34:06.000000000 -0400
55525 @@ -624,7 +624,7 @@ struct snd_soc_platform_driver {
55526 struct snd_soc_dai *);
55527
55528 /* platform stream ops */
55529 - struct snd_pcm_ops *ops;
55530 + struct snd_pcm_ops * const ops;
55531 };
55532
55533 struct snd_soc_platform {
55534 diff -urNp linux-2.6.39.4/include/sound/ymfpci.h linux-2.6.39.4/include/sound/ymfpci.h
55535 --- linux-2.6.39.4/include/sound/ymfpci.h 2011-05-19 00:06:34.000000000 -0400
55536 +++ linux-2.6.39.4/include/sound/ymfpci.h 2011-08-05 19:44:37.000000000 -0400
55537 @@ -358,7 +358,7 @@ struct snd_ymfpci {
55538 spinlock_t reg_lock;
55539 spinlock_t voice_lock;
55540 wait_queue_head_t interrupt_sleep;
55541 - atomic_t interrupt_sleep_count;
55542 + atomic_unchecked_t interrupt_sleep_count;
55543 struct snd_info_entry *proc_entry;
55544 const struct firmware *dsp_microcode;
55545 const struct firmware *controller_microcode;
55546 diff -urNp linux-2.6.39.4/include/target/target_core_base.h linux-2.6.39.4/include/target/target_core_base.h
55547 --- linux-2.6.39.4/include/target/target_core_base.h 2011-06-03 00:04:14.000000000 -0400
55548 +++ linux-2.6.39.4/include/target/target_core_base.h 2011-08-05 20:34:06.000000000 -0400
55549 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
55550 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55551 int (*t10_pr_register)(struct se_cmd *);
55552 int (*t10_pr_clear)(struct se_cmd *);
55553 -};
55554 +} __no_const;
55555
55556 struct t10_reservation_template {
55557 /* Reservation effects all target ports */
55558 @@ -432,8 +432,8 @@ struct se_transport_task {
55559 atomic_t t_task_cdbs_left;
55560 atomic_t t_task_cdbs_ex_left;
55561 atomic_t t_task_cdbs_timeout_left;
55562 - atomic_t t_task_cdbs_sent;
55563 - atomic_t t_transport_aborted;
55564 + atomic_unchecked_t t_task_cdbs_sent;
55565 + atomic_unchecked_t t_transport_aborted;
55566 atomic_t t_transport_active;
55567 atomic_t t_transport_complete;
55568 atomic_t t_transport_queue_active;
55569 @@ -774,7 +774,7 @@ struct se_device {
55570 atomic_t active_cmds;
55571 atomic_t simple_cmds;
55572 atomic_t depth_left;
55573 - atomic_t dev_ordered_id;
55574 + atomic_unchecked_t dev_ordered_id;
55575 atomic_t dev_tur_active;
55576 atomic_t execute_tasks;
55577 atomic_t dev_status_thr_count;
55578 diff -urNp linux-2.6.39.4/include/trace/events/irq.h linux-2.6.39.4/include/trace/events/irq.h
55579 --- linux-2.6.39.4/include/trace/events/irq.h 2011-05-19 00:06:34.000000000 -0400
55580 +++ linux-2.6.39.4/include/trace/events/irq.h 2011-08-05 19:44:37.000000000 -0400
55581 @@ -36,7 +36,7 @@ struct softirq_action;
55582 */
55583 TRACE_EVENT(irq_handler_entry,
55584
55585 - TP_PROTO(int irq, struct irqaction *action),
55586 + TP_PROTO(int irq, const struct irqaction *action),
55587
55588 TP_ARGS(irq, action),
55589
55590 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55591 */
55592 TRACE_EVENT(irq_handler_exit,
55593
55594 - TP_PROTO(int irq, struct irqaction *action, int ret),
55595 + TP_PROTO(int irq, const struct irqaction *action, int ret),
55596
55597 TP_ARGS(irq, action, ret),
55598
55599 diff -urNp linux-2.6.39.4/include/video/udlfb.h linux-2.6.39.4/include/video/udlfb.h
55600 --- linux-2.6.39.4/include/video/udlfb.h 2011-05-19 00:06:34.000000000 -0400
55601 +++ linux-2.6.39.4/include/video/udlfb.h 2011-08-05 19:44:37.000000000 -0400
55602 @@ -51,10 +51,10 @@ struct dlfb_data {
55603 int base8;
55604 u32 pseudo_palette[256];
55605 /* blit-only rendering path metrics, exposed through sysfs */
55606 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55607 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55608 - atomic_t bytes_sent; /* to usb, after compression including overhead */
55609 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55610 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55611 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55612 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55613 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55614 };
55615
55616 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55617 diff -urNp linux-2.6.39.4/include/video/uvesafb.h linux-2.6.39.4/include/video/uvesafb.h
55618 --- linux-2.6.39.4/include/video/uvesafb.h 2011-05-19 00:06:34.000000000 -0400
55619 +++ linux-2.6.39.4/include/video/uvesafb.h 2011-08-05 19:44:37.000000000 -0400
55620 @@ -177,6 +177,7 @@ struct uvesafb_par {
55621 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55622 u8 pmi_setpal; /* PMI for palette changes */
55623 u16 *pmi_base; /* protected mode interface location */
55624 + u8 *pmi_code; /* protected mode code location */
55625 void *pmi_start;
55626 void *pmi_pal;
55627 u8 *vbe_state_orig; /*
55628 diff -urNp linux-2.6.39.4/init/do_mounts.c linux-2.6.39.4/init/do_mounts.c
55629 --- linux-2.6.39.4/init/do_mounts.c 2011-05-19 00:06:34.000000000 -0400
55630 +++ linux-2.6.39.4/init/do_mounts.c 2011-08-05 19:44:37.000000000 -0400
55631 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55632
55633 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55634 {
55635 - int err = sys_mount(name, "/root", fs, flags, data);
55636 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55637 if (err)
55638 return err;
55639
55640 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55641 va_start(args, fmt);
55642 vsprintf(buf, fmt, args);
55643 va_end(args);
55644 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55645 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55646 if (fd >= 0) {
55647 sys_ioctl(fd, FDEJECT, 0);
55648 sys_close(fd);
55649 }
55650 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55651 - fd = sys_open("/dev/console", O_RDWR, 0);
55652 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55653 if (fd >= 0) {
55654 sys_ioctl(fd, TCGETS, (long)&termios);
55655 termios.c_lflag &= ~ICANON;
55656 sys_ioctl(fd, TCSETSF, (long)&termios);
55657 - sys_read(fd, &c, 1);
55658 + sys_read(fd, (char __user *)&c, 1);
55659 termios.c_lflag |= ICANON;
55660 sys_ioctl(fd, TCSETSF, (long)&termios);
55661 sys_close(fd);
55662 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55663 mount_root();
55664 out:
55665 devtmpfs_mount("dev");
55666 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55667 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55668 sys_chroot((const char __user __force *)".");
55669 }
55670 diff -urNp linux-2.6.39.4/init/do_mounts.h linux-2.6.39.4/init/do_mounts.h
55671 --- linux-2.6.39.4/init/do_mounts.h 2011-05-19 00:06:34.000000000 -0400
55672 +++ linux-2.6.39.4/init/do_mounts.h 2011-08-05 19:44:37.000000000 -0400
55673 @@ -15,15 +15,15 @@ extern int root_mountflags;
55674
55675 static inline int create_dev(char *name, dev_t dev)
55676 {
55677 - sys_unlink(name);
55678 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55679 + sys_unlink((__force char __user *)name);
55680 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55681 }
55682
55683 #if BITS_PER_LONG == 32
55684 static inline u32 bstat(char *name)
55685 {
55686 struct stat64 stat;
55687 - if (sys_stat64(name, &stat) != 0)
55688 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55689 return 0;
55690 if (!S_ISBLK(stat.st_mode))
55691 return 0;
55692 diff -urNp linux-2.6.39.4/init/do_mounts_initrd.c linux-2.6.39.4/init/do_mounts_initrd.c
55693 --- linux-2.6.39.4/init/do_mounts_initrd.c 2011-05-19 00:06:34.000000000 -0400
55694 +++ linux-2.6.39.4/init/do_mounts_initrd.c 2011-08-05 19:44:37.000000000 -0400
55695 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55696 create_dev("/dev/root.old", Root_RAM0);
55697 /* mount initrd on rootfs' /root */
55698 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55699 - sys_mkdir("/old", 0700);
55700 - root_fd = sys_open("/", 0, 0);
55701 - old_fd = sys_open("/old", 0, 0);
55702 + sys_mkdir((__force const char __user *)"/old", 0700);
55703 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
55704 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55705 /* move initrd over / and chdir/chroot in initrd root */
55706 - sys_chdir("/root");
55707 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55708 - sys_chroot(".");
55709 + sys_chdir((__force const char __user *)"/root");
55710 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55711 + sys_chroot((__force const char __user *)".");
55712
55713 /*
55714 * In case that a resume from disk is carried out by linuxrc or one of
55715 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55716
55717 /* move initrd to rootfs' /old */
55718 sys_fchdir(old_fd);
55719 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
55720 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55721 /* switch root and cwd back to / of rootfs */
55722 sys_fchdir(root_fd);
55723 - sys_chroot(".");
55724 + sys_chroot((__force const char __user *)".");
55725 sys_close(old_fd);
55726 sys_close(root_fd);
55727
55728 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55729 - sys_chdir("/old");
55730 + sys_chdir((__force const char __user *)"/old");
55731 return;
55732 }
55733
55734 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55735 mount_root();
55736
55737 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55738 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55739 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55740 if (!error)
55741 printk("okay\n");
55742 else {
55743 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
55744 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55745 if (error == -ENOENT)
55746 printk("/initrd does not exist. Ignored.\n");
55747 else
55748 printk("failed\n");
55749 printk(KERN_NOTICE "Unmounting old root\n");
55750 - sys_umount("/old", MNT_DETACH);
55751 + sys_umount((__force char __user *)"/old", MNT_DETACH);
55752 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55753 if (fd < 0) {
55754 error = fd;
55755 @@ -116,11 +116,11 @@ int __init initrd_load(void)
55756 * mounted in the normal path.
55757 */
55758 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55759 - sys_unlink("/initrd.image");
55760 + sys_unlink((__force const char __user *)"/initrd.image");
55761 handle_initrd();
55762 return 1;
55763 }
55764 }
55765 - sys_unlink("/initrd.image");
55766 + sys_unlink((__force const char __user *)"/initrd.image");
55767 return 0;
55768 }
55769 diff -urNp linux-2.6.39.4/init/do_mounts_md.c linux-2.6.39.4/init/do_mounts_md.c
55770 --- linux-2.6.39.4/init/do_mounts_md.c 2011-05-19 00:06:34.000000000 -0400
55771 +++ linux-2.6.39.4/init/do_mounts_md.c 2011-08-05 19:44:37.000000000 -0400
55772 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55773 partitioned ? "_d" : "", minor,
55774 md_setup_args[ent].device_names);
55775
55776 - fd = sys_open(name, 0, 0);
55777 + fd = sys_open((__force char __user *)name, 0, 0);
55778 if (fd < 0) {
55779 printk(KERN_ERR "md: open failed - cannot start "
55780 "array %s\n", name);
55781 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55782 * array without it
55783 */
55784 sys_close(fd);
55785 - fd = sys_open(name, 0, 0);
55786 + fd = sys_open((__force char __user *)name, 0, 0);
55787 sys_ioctl(fd, BLKRRPART, 0);
55788 }
55789 sys_close(fd);
55790 diff -urNp linux-2.6.39.4/init/initramfs.c linux-2.6.39.4/init/initramfs.c
55791 --- linux-2.6.39.4/init/initramfs.c 2011-05-19 00:06:34.000000000 -0400
55792 +++ linux-2.6.39.4/init/initramfs.c 2011-08-05 19:44:37.000000000 -0400
55793 @@ -74,7 +74,7 @@ static void __init free_hash(void)
55794 }
55795 }
55796
55797 -static long __init do_utime(char __user *filename, time_t mtime)
55798 +static long __init do_utime(__force char __user *filename, time_t mtime)
55799 {
55800 struct timespec t[2];
55801
55802 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
55803 struct dir_entry *de, *tmp;
55804 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55805 list_del(&de->list);
55806 - do_utime(de->name, de->mtime);
55807 + do_utime((__force char __user *)de->name, de->mtime);
55808 kfree(de->name);
55809 kfree(de);
55810 }
55811 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
55812 if (nlink >= 2) {
55813 char *old = find_link(major, minor, ino, mode, collected);
55814 if (old)
55815 - return (sys_link(old, collected) < 0) ? -1 : 1;
55816 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55817 }
55818 return 0;
55819 }
55820 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
55821 {
55822 struct stat st;
55823
55824 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55825 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55826 if (S_ISDIR(st.st_mode))
55827 - sys_rmdir(path);
55828 + sys_rmdir((__force char __user *)path);
55829 else
55830 - sys_unlink(path);
55831 + sys_unlink((__force char __user *)path);
55832 }
55833 }
55834
55835 @@ -305,7 +305,7 @@ static int __init do_name(void)
55836 int openflags = O_WRONLY|O_CREAT;
55837 if (ml != 1)
55838 openflags |= O_TRUNC;
55839 - wfd = sys_open(collected, openflags, mode);
55840 + wfd = sys_open((__force char __user *)collected, openflags, mode);
55841
55842 if (wfd >= 0) {
55843 sys_fchown(wfd, uid, gid);
55844 @@ -317,17 +317,17 @@ static int __init do_name(void)
55845 }
55846 }
55847 } else if (S_ISDIR(mode)) {
55848 - sys_mkdir(collected, mode);
55849 - sys_chown(collected, uid, gid);
55850 - sys_chmod(collected, mode);
55851 + sys_mkdir((__force char __user *)collected, mode);
55852 + sys_chown((__force char __user *)collected, uid, gid);
55853 + sys_chmod((__force char __user *)collected, mode);
55854 dir_add(collected, mtime);
55855 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55856 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55857 if (maybe_link() == 0) {
55858 - sys_mknod(collected, mode, rdev);
55859 - sys_chown(collected, uid, gid);
55860 - sys_chmod(collected, mode);
55861 - do_utime(collected, mtime);
55862 + sys_mknod((__force char __user *)collected, mode, rdev);
55863 + sys_chown((__force char __user *)collected, uid, gid);
55864 + sys_chmod((__force char __user *)collected, mode);
55865 + do_utime((__force char __user *)collected, mtime);
55866 }
55867 }
55868 return 0;
55869 @@ -336,15 +336,15 @@ static int __init do_name(void)
55870 static int __init do_copy(void)
55871 {
55872 if (count >= body_len) {
55873 - sys_write(wfd, victim, body_len);
55874 + sys_write(wfd, (__force char __user *)victim, body_len);
55875 sys_close(wfd);
55876 - do_utime(vcollected, mtime);
55877 + do_utime((__force char __user *)vcollected, mtime);
55878 kfree(vcollected);
55879 eat(body_len);
55880 state = SkipIt;
55881 return 0;
55882 } else {
55883 - sys_write(wfd, victim, count);
55884 + sys_write(wfd, (__force char __user *)victim, count);
55885 body_len -= count;
55886 eat(count);
55887 return 1;
55888 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
55889 {
55890 collected[N_ALIGN(name_len) + body_len] = '\0';
55891 clean_path(collected, 0);
55892 - sys_symlink(collected + N_ALIGN(name_len), collected);
55893 - sys_lchown(collected, uid, gid);
55894 - do_utime(collected, mtime);
55895 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55896 + sys_lchown((__force char __user *)collected, uid, gid);
55897 + do_utime((__force char __user *)collected, mtime);
55898 state = SkipIt;
55899 next_state = Reset;
55900 return 0;
55901 diff -urNp linux-2.6.39.4/init/Kconfig linux-2.6.39.4/init/Kconfig
55902 --- linux-2.6.39.4/init/Kconfig 2011-05-19 00:06:34.000000000 -0400
55903 +++ linux-2.6.39.4/init/Kconfig 2011-08-05 19:44:37.000000000 -0400
55904 @@ -1202,7 +1202,7 @@ config SLUB_DEBUG
55905
55906 config COMPAT_BRK
55907 bool "Disable heap randomization"
55908 - default y
55909 + default n
55910 help
55911 Randomizing heap placement makes heap exploits harder, but it
55912 also breaks ancient binaries (including anything libc5 based).
55913 diff -urNp linux-2.6.39.4/init/main.c linux-2.6.39.4/init/main.c
55914 --- linux-2.6.39.4/init/main.c 2011-06-03 00:04:14.000000000 -0400
55915 +++ linux-2.6.39.4/init/main.c 2011-08-05 20:34:06.000000000 -0400
55916 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
55917 extern void tc_init(void);
55918 #endif
55919
55920 +extern void grsecurity_init(void);
55921 +
55922 /*
55923 * Debug helper: via this flag we know that we are in 'early bootup code'
55924 * where only the boot processor is running with IRQ disabled. This means
55925 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
55926
55927 __setup("reset_devices", set_reset_devices);
55928
55929 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
55930 +extern char pax_enter_kernel_user[];
55931 +extern char pax_exit_kernel_user[];
55932 +extern pgdval_t clone_pgd_mask;
55933 +#endif
55934 +
55935 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
55936 +static int __init setup_pax_nouderef(char *str)
55937 +{
55938 +#ifdef CONFIG_X86_32
55939 + unsigned int cpu;
55940 + struct desc_struct *gdt;
55941 +
55942 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
55943 + gdt = get_cpu_gdt_table(cpu);
55944 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
55945 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
55946 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
55947 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
55948 + }
55949 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
55950 +#else
55951 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
55952 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
55953 + clone_pgd_mask = ~(pgdval_t)0UL;
55954 +#endif
55955 +
55956 + return 0;
55957 +}
55958 +early_param("pax_nouderef", setup_pax_nouderef);
55959 +#endif
55960 +
55961 +#ifdef CONFIG_PAX_SOFTMODE
55962 +int pax_softmode;
55963 +
55964 +static int __init setup_pax_softmode(char *str)
55965 +{
55966 + get_option(&str, &pax_softmode);
55967 + return 1;
55968 +}
55969 +__setup("pax_softmode=", setup_pax_softmode);
55970 +#endif
55971 +
55972 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
55973 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
55974 static const char *panic_later, *panic_param;
55975 @@ -663,6 +708,7 @@ int __init_or_module do_one_initcall(ini
55976 {
55977 int count = preempt_count();
55978 int ret;
55979 + const char *msg1 = "", *msg2 = "";
55980
55981 if (initcall_debug)
55982 ret = do_one_initcall_debug(fn);
55983 @@ -675,15 +721,15 @@ int __init_or_module do_one_initcall(ini
55984 sprintf(msgbuf, "error code %d ", ret);
55985
55986 if (preempt_count() != count) {
55987 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
55988 + msg1 = " preemption imbalance";
55989 preempt_count() = count;
55990 }
55991 if (irqs_disabled()) {
55992 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
55993 + msg2 = " disabled interrupts";
55994 local_irq_enable();
55995 }
55996 - if (msgbuf[0]) {
55997 - printk("initcall %pF returned with %s\n", fn, msgbuf);
55998 + if (msgbuf[0] || *msg1 || *msg2) {
55999 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
56000 }
56001
56002 return ret;
56003 @@ -801,7 +847,7 @@ static int __init kernel_init(void * unu
56004 do_basic_setup();
56005
56006 /* Open the /dev/console on the rootfs, this should never fail */
56007 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
56008 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
56009 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
56010
56011 (void) sys_dup(0);
56012 @@ -814,11 +860,13 @@ static int __init kernel_init(void * unu
56013 if (!ramdisk_execute_command)
56014 ramdisk_execute_command = "/init";
56015
56016 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
56017 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
56018 ramdisk_execute_command = NULL;
56019 prepare_namespace();
56020 }
56021
56022 + grsecurity_init();
56023 +
56024 /*
56025 * Ok, we have completed the initial bootup, and
56026 * we're essentially up and running. Get rid of the
56027 diff -urNp linux-2.6.39.4/ipc/mqueue.c linux-2.6.39.4/ipc/mqueue.c
56028 --- linux-2.6.39.4/ipc/mqueue.c 2011-05-19 00:06:34.000000000 -0400
56029 +++ linux-2.6.39.4/ipc/mqueue.c 2011-08-05 19:44:37.000000000 -0400
56030 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
56031 mq_bytes = (mq_msg_tblsz +
56032 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
56033
56034 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
56035 spin_lock(&mq_lock);
56036 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
56037 u->mq_bytes + mq_bytes >
56038 diff -urNp linux-2.6.39.4/ipc/msg.c linux-2.6.39.4/ipc/msg.c
56039 --- linux-2.6.39.4/ipc/msg.c 2011-05-19 00:06:34.000000000 -0400
56040 +++ linux-2.6.39.4/ipc/msg.c 2011-08-05 20:34:06.000000000 -0400
56041 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
56042 return security_msg_queue_associate(msq, msgflg);
56043 }
56044
56045 +static struct ipc_ops msg_ops = {
56046 + .getnew = newque,
56047 + .associate = msg_security,
56048 + .more_checks = NULL
56049 +};
56050 +
56051 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
56052 {
56053 struct ipc_namespace *ns;
56054 - struct ipc_ops msg_ops;
56055 struct ipc_params msg_params;
56056
56057 ns = current->nsproxy->ipc_ns;
56058
56059 - msg_ops.getnew = newque;
56060 - msg_ops.associate = msg_security;
56061 - msg_ops.more_checks = NULL;
56062 -
56063 msg_params.key = key;
56064 msg_params.flg = msgflg;
56065
56066 diff -urNp linux-2.6.39.4/ipc/sem.c linux-2.6.39.4/ipc/sem.c
56067 --- linux-2.6.39.4/ipc/sem.c 2011-05-19 00:06:34.000000000 -0400
56068 +++ linux-2.6.39.4/ipc/sem.c 2011-08-05 20:34:06.000000000 -0400
56069 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
56070 return 0;
56071 }
56072
56073 +static struct ipc_ops sem_ops = {
56074 + .getnew = newary,
56075 + .associate = sem_security,
56076 + .more_checks = sem_more_checks
56077 +};
56078 +
56079 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
56080 {
56081 struct ipc_namespace *ns;
56082 - struct ipc_ops sem_ops;
56083 struct ipc_params sem_params;
56084
56085 ns = current->nsproxy->ipc_ns;
56086 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
56087 if (nsems < 0 || nsems > ns->sc_semmsl)
56088 return -EINVAL;
56089
56090 - sem_ops.getnew = newary;
56091 - sem_ops.associate = sem_security;
56092 - sem_ops.more_checks = sem_more_checks;
56093 -
56094 sem_params.key = key;
56095 sem_params.flg = semflg;
56096 sem_params.u.nsems = nsems;
56097 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
56098 int nsems;
56099 struct list_head tasks;
56100
56101 + pax_track_stack();
56102 +
56103 sma = sem_lock_check(ns, semid);
56104 if (IS_ERR(sma))
56105 return PTR_ERR(sma);
56106 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
56107 struct ipc_namespace *ns;
56108 struct list_head tasks;
56109
56110 + pax_track_stack();
56111 +
56112 ns = current->nsproxy->ipc_ns;
56113
56114 if (nsops < 1 || semid < 0)
56115 diff -urNp linux-2.6.39.4/ipc/shm.c linux-2.6.39.4/ipc/shm.c
56116 --- linux-2.6.39.4/ipc/shm.c 2011-05-19 00:06:34.000000000 -0400
56117 +++ linux-2.6.39.4/ipc/shm.c 2011-08-05 20:34:06.000000000 -0400
56118 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
56119 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
56120 #endif
56121
56122 +#ifdef CONFIG_GRKERNSEC
56123 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56124 + const time_t shm_createtime, const uid_t cuid,
56125 + const int shmid);
56126 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56127 + const time_t shm_createtime);
56128 +#endif
56129 +
56130 void shm_init_ns(struct ipc_namespace *ns)
56131 {
56132 ns->shm_ctlmax = SHMMAX;
56133 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
56134 shp->shm_lprid = 0;
56135 shp->shm_atim = shp->shm_dtim = 0;
56136 shp->shm_ctim = get_seconds();
56137 +#ifdef CONFIG_GRKERNSEC
56138 + {
56139 + struct timespec timeval;
56140 + do_posix_clock_monotonic_gettime(&timeval);
56141 +
56142 + shp->shm_createtime = timeval.tv_sec;
56143 + }
56144 +#endif
56145 shp->shm_segsz = size;
56146 shp->shm_nattch = 0;
56147 shp->shm_file = file;
56148 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
56149 return 0;
56150 }
56151
56152 +static struct ipc_ops shm_ops = {
56153 + .getnew = newseg,
56154 + .associate = shm_security,
56155 + .more_checks = shm_more_checks
56156 +};
56157 +
56158 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
56159 {
56160 struct ipc_namespace *ns;
56161 - struct ipc_ops shm_ops;
56162 struct ipc_params shm_params;
56163
56164 ns = current->nsproxy->ipc_ns;
56165
56166 - shm_ops.getnew = newseg;
56167 - shm_ops.associate = shm_security;
56168 - shm_ops.more_checks = shm_more_checks;
56169 -
56170 shm_params.key = key;
56171 shm_params.flg = shmflg;
56172 shm_params.u.size = size;
56173 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
56174 case SHM_LOCK:
56175 case SHM_UNLOCK:
56176 {
56177 - struct file *uninitialized_var(shm_file);
56178 -
56179 lru_add_drain_all(); /* drain pagevecs to lru lists */
56180
56181 shp = shm_lock_check(ns, shmid);
56182 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
56183 if (err)
56184 goto out_unlock;
56185
56186 +#ifdef CONFIG_GRKERNSEC
56187 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
56188 + shp->shm_perm.cuid, shmid) ||
56189 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56190 + err = -EACCES;
56191 + goto out_unlock;
56192 + }
56193 +#endif
56194 +
56195 path = shp->shm_file->f_path;
56196 path_get(&path);
56197 shp->shm_nattch++;
56198 +#ifdef CONFIG_GRKERNSEC
56199 + shp->shm_lapid = current->pid;
56200 +#endif
56201 size = i_size_read(path.dentry->d_inode);
56202 shm_unlock(shp);
56203
56204 diff -urNp linux-2.6.39.4/kernel/acct.c linux-2.6.39.4/kernel/acct.c
56205 --- linux-2.6.39.4/kernel/acct.c 2011-05-19 00:06:34.000000000 -0400
56206 +++ linux-2.6.39.4/kernel/acct.c 2011-08-05 19:44:37.000000000 -0400
56207 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56208 */
56209 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56210 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56211 - file->f_op->write(file, (char *)&ac,
56212 + file->f_op->write(file, (__force char __user *)&ac,
56213 sizeof(acct_t), &file->f_pos);
56214 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56215 set_fs(fs);
56216 diff -urNp linux-2.6.39.4/kernel/audit.c linux-2.6.39.4/kernel/audit.c
56217 --- linux-2.6.39.4/kernel/audit.c 2011-05-19 00:06:34.000000000 -0400
56218 +++ linux-2.6.39.4/kernel/audit.c 2011-08-05 19:44:37.000000000 -0400
56219 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56220 3) suppressed due to audit_rate_limit
56221 4) suppressed due to audit_backlog_limit
56222 */
56223 -static atomic_t audit_lost = ATOMIC_INIT(0);
56224 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56225
56226 /* The netlink socket. */
56227 static struct sock *audit_sock;
56228 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56229 unsigned long now;
56230 int print;
56231
56232 - atomic_inc(&audit_lost);
56233 + atomic_inc_unchecked(&audit_lost);
56234
56235 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56236
56237 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56238 printk(KERN_WARNING
56239 "audit: audit_lost=%d audit_rate_limit=%d "
56240 "audit_backlog_limit=%d\n",
56241 - atomic_read(&audit_lost),
56242 + atomic_read_unchecked(&audit_lost),
56243 audit_rate_limit,
56244 audit_backlog_limit);
56245 audit_panic(message);
56246 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56247 status_set.pid = audit_pid;
56248 status_set.rate_limit = audit_rate_limit;
56249 status_set.backlog_limit = audit_backlog_limit;
56250 - status_set.lost = atomic_read(&audit_lost);
56251 + status_set.lost = atomic_read_unchecked(&audit_lost);
56252 status_set.backlog = skb_queue_len(&audit_skb_queue);
56253 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56254 &status_set, sizeof(status_set));
56255 diff -urNp linux-2.6.39.4/kernel/auditsc.c linux-2.6.39.4/kernel/auditsc.c
56256 --- linux-2.6.39.4/kernel/auditsc.c 2011-05-19 00:06:34.000000000 -0400
56257 +++ linux-2.6.39.4/kernel/auditsc.c 2011-08-05 19:44:37.000000000 -0400
56258 @@ -2111,7 +2111,7 @@ int auditsc_get_stamp(struct audit_conte
56259 }
56260
56261 /* global counter which is incremented every time something logs in */
56262 -static atomic_t session_id = ATOMIC_INIT(0);
56263 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56264
56265 /**
56266 * audit_set_loginuid - set a task's audit_context loginuid
56267 @@ -2124,7 +2124,7 @@ static atomic_t session_id = ATOMIC_INIT
56268 */
56269 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56270 {
56271 - unsigned int sessionid = atomic_inc_return(&session_id);
56272 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56273 struct audit_context *context = task->audit_context;
56274
56275 if (context && context->in_syscall) {
56276 diff -urNp linux-2.6.39.4/kernel/capability.c linux-2.6.39.4/kernel/capability.c
56277 --- linux-2.6.39.4/kernel/capability.c 2011-05-19 00:06:34.000000000 -0400
56278 +++ linux-2.6.39.4/kernel/capability.c 2011-08-05 19:44:37.000000000 -0400
56279 @@ -206,6 +206,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56280 * before modification is attempted and the application
56281 * fails.
56282 */
56283 + if (tocopy > ARRAY_SIZE(kdata))
56284 + return -EFAULT;
56285 +
56286 if (copy_to_user(dataptr, kdata, tocopy
56287 * sizeof(struct __user_cap_data_struct))) {
56288 return -EFAULT;
56289 @@ -378,7 +381,7 @@ bool ns_capable(struct user_namespace *n
56290 BUG();
56291 }
56292
56293 - if (security_capable(ns, current_cred(), cap) == 0) {
56294 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56295 current->flags |= PF_SUPERPRIV;
56296 return true;
56297 }
56298 @@ -386,6 +389,27 @@ bool ns_capable(struct user_namespace *n
56299 }
56300 EXPORT_SYMBOL(ns_capable);
56301
56302 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
56303 +{
56304 + if (unlikely(!cap_valid(cap))) {
56305 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56306 + BUG();
56307 + }
56308 +
56309 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56310 + current->flags |= PF_SUPERPRIV;
56311 + return true;
56312 + }
56313 + return false;
56314 +}
56315 +EXPORT_SYMBOL(ns_capable_nolog);
56316 +
56317 +bool capable_nolog(int cap)
56318 +{
56319 + return ns_capable_nolog(&init_user_ns, cap);
56320 +}
56321 +EXPORT_SYMBOL(capable_nolog);
56322 +
56323 /**
56324 * task_ns_capable - Determine whether current task has a superior
56325 * capability targeted at a specific task's user namespace.
56326 @@ -400,6 +424,12 @@ bool task_ns_capable(struct task_struct
56327 }
56328 EXPORT_SYMBOL(task_ns_capable);
56329
56330 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
56331 +{
56332 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56333 +}
56334 +EXPORT_SYMBOL(task_ns_capable_nolog);
56335 +
56336 /**
56337 * nsown_capable - Check superior capability to one's own user_ns
56338 * @cap: The capability in question
56339 diff -urNp linux-2.6.39.4/kernel/cgroup.c linux-2.6.39.4/kernel/cgroup.c
56340 --- linux-2.6.39.4/kernel/cgroup.c 2011-05-19 00:06:34.000000000 -0400
56341 +++ linux-2.6.39.4/kernel/cgroup.c 2011-08-05 19:44:37.000000000 -0400
56342 @@ -598,6 +598,8 @@ static struct css_set *find_css_set(
56343 struct hlist_head *hhead;
56344 struct cg_cgroup_link *link;
56345
56346 + pax_track_stack();
56347 +
56348 /* First see if we already have a cgroup group that matches
56349 * the desired set */
56350 read_lock(&css_set_lock);
56351 diff -urNp linux-2.6.39.4/kernel/compat.c linux-2.6.39.4/kernel/compat.c
56352 --- linux-2.6.39.4/kernel/compat.c 2011-05-19 00:06:34.000000000 -0400
56353 +++ linux-2.6.39.4/kernel/compat.c 2011-08-05 19:44:37.000000000 -0400
56354 @@ -13,6 +13,7 @@
56355
56356 #include <linux/linkage.h>
56357 #include <linux/compat.h>
56358 +#include <linux/module.h>
56359 #include <linux/errno.h>
56360 #include <linux/time.h>
56361 #include <linux/signal.h>
56362 diff -urNp linux-2.6.39.4/kernel/configs.c linux-2.6.39.4/kernel/configs.c
56363 --- linux-2.6.39.4/kernel/configs.c 2011-05-19 00:06:34.000000000 -0400
56364 +++ linux-2.6.39.4/kernel/configs.c 2011-08-05 19:44:37.000000000 -0400
56365 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56366 struct proc_dir_entry *entry;
56367
56368 /* create the current config file */
56369 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56370 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56371 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56372 + &ikconfig_file_ops);
56373 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56374 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56375 + &ikconfig_file_ops);
56376 +#endif
56377 +#else
56378 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56379 &ikconfig_file_ops);
56380 +#endif
56381 +
56382 if (!entry)
56383 return -ENOMEM;
56384
56385 diff -urNp linux-2.6.39.4/kernel/cred.c linux-2.6.39.4/kernel/cred.c
56386 --- linux-2.6.39.4/kernel/cred.c 2011-05-19 00:06:34.000000000 -0400
56387 +++ linux-2.6.39.4/kernel/cred.c 2011-08-05 19:44:37.000000000 -0400
56388 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56389 */
56390 void __put_cred(struct cred *cred)
56391 {
56392 + pax_track_stack();
56393 +
56394 kdebug("__put_cred(%p{%d,%d})", cred,
56395 atomic_read(&cred->usage),
56396 read_cred_subscribers(cred));
56397 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56398 {
56399 struct cred *cred;
56400
56401 + pax_track_stack();
56402 +
56403 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56404 atomic_read(&tsk->cred->usage),
56405 read_cred_subscribers(tsk->cred));
56406 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56407 {
56408 const struct cred *cred;
56409
56410 + pax_track_stack();
56411 +
56412 rcu_read_lock();
56413
56414 do {
56415 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56416 {
56417 struct cred *new;
56418
56419 + pax_track_stack();
56420 +
56421 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56422 if (!new)
56423 return NULL;
56424 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56425 const struct cred *old;
56426 struct cred *new;
56427
56428 + pax_track_stack();
56429 +
56430 validate_process_creds();
56431
56432 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56433 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56434 struct thread_group_cred *tgcred = NULL;
56435 struct cred *new;
56436
56437 + pax_track_stack();
56438 +
56439 #ifdef CONFIG_KEYS
56440 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56441 if (!tgcred)
56442 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56443 struct cred *new;
56444 int ret;
56445
56446 + pax_track_stack();
56447 +
56448 if (
56449 #ifdef CONFIG_KEYS
56450 !p->cred->thread_keyring &&
56451 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56452 struct task_struct *task = current;
56453 const struct cred *old = task->real_cred;
56454
56455 + pax_track_stack();
56456 +
56457 kdebug("commit_creds(%p{%d,%d})", new,
56458 atomic_read(&new->usage),
56459 read_cred_subscribers(new));
56460 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56461
56462 get_cred(new); /* we will require a ref for the subj creds too */
56463
56464 + gr_set_role_label(task, new->uid, new->gid);
56465 +
56466 /* dumpability changes */
56467 if (old->euid != new->euid ||
56468 old->egid != new->egid ||
56469 @@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
56470 */
56471 void abort_creds(struct cred *new)
56472 {
56473 + pax_track_stack();
56474 +
56475 kdebug("abort_creds(%p{%d,%d})", new,
56476 atomic_read(&new->usage),
56477 read_cred_subscribers(new));
56478 @@ -574,6 +594,8 @@ const struct cred *override_creds(const
56479 {
56480 const struct cred *old = current->cred;
56481
56482 + pax_track_stack();
56483 +
56484 kdebug("override_creds(%p{%d,%d})", new,
56485 atomic_read(&new->usage),
56486 read_cred_subscribers(new));
56487 @@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
56488 {
56489 const struct cred *override = current->cred;
56490
56491 + pax_track_stack();
56492 +
56493 kdebug("revert_creds(%p{%d,%d})", old,
56494 atomic_read(&old->usage),
56495 read_cred_subscribers(old));
56496 @@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
56497 const struct cred *old;
56498 struct cred *new;
56499
56500 + pax_track_stack();
56501 +
56502 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56503 if (!new)
56504 return NULL;
56505 @@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56506 */
56507 int set_security_override(struct cred *new, u32 secid)
56508 {
56509 + pax_track_stack();
56510 +
56511 return security_kernel_act_as(new, secid);
56512 }
56513 EXPORT_SYMBOL(set_security_override);
56514 @@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
56515 u32 secid;
56516 int ret;
56517
56518 + pax_track_stack();
56519 +
56520 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56521 if (ret < 0)
56522 return ret;
56523 diff -urNp linux-2.6.39.4/kernel/debug/debug_core.c linux-2.6.39.4/kernel/debug/debug_core.c
56524 --- linux-2.6.39.4/kernel/debug/debug_core.c 2011-05-19 00:06:34.000000000 -0400
56525 +++ linux-2.6.39.4/kernel/debug/debug_core.c 2011-08-05 20:34:06.000000000 -0400
56526 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56527 */
56528 static atomic_t masters_in_kgdb;
56529 static atomic_t slaves_in_kgdb;
56530 -static atomic_t kgdb_break_tasklet_var;
56531 +static atomic_unchecked_t kgdb_break_tasklet_var;
56532 atomic_t kgdb_setting_breakpoint;
56533
56534 struct task_struct *kgdb_usethread;
56535 @@ -129,7 +129,7 @@ int kgdb_single_step;
56536 static pid_t kgdb_sstep_pid;
56537
56538 /* to keep track of the CPU which is doing the single stepping*/
56539 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56540 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56541
56542 /*
56543 * If you are debugging a problem where roundup (the collection of
56544 @@ -542,7 +542,7 @@ return_normal:
56545 * kernel will only try for the value of sstep_tries before
56546 * giving up and continuing on.
56547 */
56548 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56549 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56550 (kgdb_info[cpu].task &&
56551 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56552 atomic_set(&kgdb_active, -1);
56553 @@ -636,8 +636,8 @@ cpu_master_loop:
56554 }
56555
56556 kgdb_restore:
56557 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56558 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56559 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56560 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56561 if (kgdb_info[sstep_cpu].task)
56562 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56563 else
56564 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56565 static void kgdb_tasklet_bpt(unsigned long ing)
56566 {
56567 kgdb_breakpoint();
56568 - atomic_set(&kgdb_break_tasklet_var, 0);
56569 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56570 }
56571
56572 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56573
56574 void kgdb_schedule_breakpoint(void)
56575 {
56576 - if (atomic_read(&kgdb_break_tasklet_var) ||
56577 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56578 atomic_read(&kgdb_active) != -1 ||
56579 atomic_read(&kgdb_setting_breakpoint))
56580 return;
56581 - atomic_inc(&kgdb_break_tasklet_var);
56582 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
56583 tasklet_schedule(&kgdb_tasklet_breakpoint);
56584 }
56585 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56586 diff -urNp linux-2.6.39.4/kernel/debug/kdb/kdb_main.c linux-2.6.39.4/kernel/debug/kdb/kdb_main.c
56587 --- linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-05-19 00:06:34.000000000 -0400
56588 +++ linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-08-05 19:44:37.000000000 -0400
56589 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56590 list_for_each_entry(mod, kdb_modules, list) {
56591
56592 kdb_printf("%-20s%8u 0x%p ", mod->name,
56593 - mod->core_size, (void *)mod);
56594 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
56595 #ifdef CONFIG_MODULE_UNLOAD
56596 kdb_printf("%4d ", module_refcount(mod));
56597 #endif
56598 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56599 kdb_printf(" (Loading)");
56600 else
56601 kdb_printf(" (Live)");
56602 - kdb_printf(" 0x%p", mod->module_core);
56603 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56604
56605 #ifdef CONFIG_MODULE_UNLOAD
56606 {
56607 diff -urNp linux-2.6.39.4/kernel/exit.c linux-2.6.39.4/kernel/exit.c
56608 --- linux-2.6.39.4/kernel/exit.c 2011-05-19 00:06:34.000000000 -0400
56609 +++ linux-2.6.39.4/kernel/exit.c 2011-08-05 19:44:37.000000000 -0400
56610 @@ -57,6 +57,10 @@
56611 #include <asm/pgtable.h>
56612 #include <asm/mmu_context.h>
56613
56614 +#ifdef CONFIG_GRKERNSEC
56615 +extern rwlock_t grsec_exec_file_lock;
56616 +#endif
56617 +
56618 static void exit_mm(struct task_struct * tsk);
56619
56620 static void __unhash_process(struct task_struct *p, bool group_dead)
56621 @@ -169,6 +173,8 @@ void release_task(struct task_struct * p
56622 struct task_struct *leader;
56623 int zap_leader;
56624 repeat:
56625 + gr_del_task_from_ip_table(p);
56626 +
56627 tracehook_prepare_release_task(p);
56628 /* don't need to get the RCU readlock here - the process is dead and
56629 * can't be modifying its own credentials. But shut RCU-lockdep up */
56630 @@ -338,11 +344,22 @@ static void reparent_to_kthreadd(void)
56631 {
56632 write_lock_irq(&tasklist_lock);
56633
56634 +#ifdef CONFIG_GRKERNSEC
56635 + write_lock(&grsec_exec_file_lock);
56636 + if (current->exec_file) {
56637 + fput(current->exec_file);
56638 + current->exec_file = NULL;
56639 + }
56640 + write_unlock(&grsec_exec_file_lock);
56641 +#endif
56642 +
56643 ptrace_unlink(current);
56644 /* Reparent to init */
56645 current->real_parent = current->parent = kthreadd_task;
56646 list_move_tail(&current->sibling, &current->real_parent->children);
56647
56648 + gr_set_kernel_label(current);
56649 +
56650 /* Set the exit signal to SIGCHLD so we signal init on exit */
56651 current->exit_signal = SIGCHLD;
56652
56653 @@ -394,7 +411,7 @@ int allow_signal(int sig)
56654 * know it'll be handled, so that they don't get converted to
56655 * SIGKILL or just silently dropped.
56656 */
56657 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56658 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56659 recalc_sigpending();
56660 spin_unlock_irq(&current->sighand->siglock);
56661 return 0;
56662 @@ -430,6 +447,17 @@ void daemonize(const char *name, ...)
56663 vsnprintf(current->comm, sizeof(current->comm), name, args);
56664 va_end(args);
56665
56666 +#ifdef CONFIG_GRKERNSEC
56667 + write_lock(&grsec_exec_file_lock);
56668 + if (current->exec_file) {
56669 + fput(current->exec_file);
56670 + current->exec_file = NULL;
56671 + }
56672 + write_unlock(&grsec_exec_file_lock);
56673 +#endif
56674 +
56675 + gr_set_kernel_label(current);
56676 +
56677 /*
56678 * If we were started as result of loading a module, close all of the
56679 * user space pages. We don't need them, and if we didn't close them
56680 @@ -905,15 +933,8 @@ NORET_TYPE void do_exit(long code)
56681 struct task_struct *tsk = current;
56682 int group_dead;
56683
56684 - profile_task_exit(tsk);
56685 -
56686 - WARN_ON(atomic_read(&tsk->fs_excl));
56687 - WARN_ON(blk_needs_flush_plug(tsk));
56688 -
56689 if (unlikely(in_interrupt()))
56690 panic("Aiee, killing interrupt handler!");
56691 - if (unlikely(!tsk->pid))
56692 - panic("Attempted to kill the idle task!");
56693
56694 /*
56695 * If do_exit is called because this processes oopsed, it's possible
56696 @@ -924,6 +945,14 @@ NORET_TYPE void do_exit(long code)
56697 */
56698 set_fs(USER_DS);
56699
56700 + profile_task_exit(tsk);
56701 +
56702 + WARN_ON(atomic_read(&tsk->fs_excl));
56703 + WARN_ON(blk_needs_flush_plug(tsk));
56704 +
56705 + if (unlikely(!tsk->pid))
56706 + panic("Attempted to kill the idle task!");
56707 +
56708 tracehook_report_exit(&code);
56709
56710 validate_creds_for_do_exit(tsk);
56711 @@ -984,6 +1013,9 @@ NORET_TYPE void do_exit(long code)
56712 tsk->exit_code = code;
56713 taskstats_exit(tsk, group_dead);
56714
56715 + gr_acl_handle_psacct(tsk, code);
56716 + gr_acl_handle_exit();
56717 +
56718 exit_mm(tsk);
56719
56720 if (group_dead)
56721 diff -urNp linux-2.6.39.4/kernel/fork.c linux-2.6.39.4/kernel/fork.c
56722 --- linux-2.6.39.4/kernel/fork.c 2011-05-19 00:06:34.000000000 -0400
56723 +++ linux-2.6.39.4/kernel/fork.c 2011-08-05 19:44:37.000000000 -0400
56724 @@ -287,7 +287,7 @@ static struct task_struct *dup_task_stru
56725 *stackend = STACK_END_MAGIC; /* for overflow detection */
56726
56727 #ifdef CONFIG_CC_STACKPROTECTOR
56728 - tsk->stack_canary = get_random_int();
56729 + tsk->stack_canary = pax_get_random_long();
56730 #endif
56731
56732 /* One for us, one for whoever does the "release_task()" (usually parent) */
56733 @@ -309,13 +309,78 @@ out:
56734 }
56735
56736 #ifdef CONFIG_MMU
56737 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56738 +{
56739 + struct vm_area_struct *tmp;
56740 + unsigned long charge;
56741 + struct mempolicy *pol;
56742 + struct file *file;
56743 +
56744 + charge = 0;
56745 + if (mpnt->vm_flags & VM_ACCOUNT) {
56746 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56747 + if (security_vm_enough_memory(len))
56748 + goto fail_nomem;
56749 + charge = len;
56750 + }
56751 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56752 + if (!tmp)
56753 + goto fail_nomem;
56754 + *tmp = *mpnt;
56755 + tmp->vm_mm = mm;
56756 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
56757 + pol = mpol_dup(vma_policy(mpnt));
56758 + if (IS_ERR(pol))
56759 + goto fail_nomem_policy;
56760 + vma_set_policy(tmp, pol);
56761 + if (anon_vma_fork(tmp, mpnt))
56762 + goto fail_nomem_anon_vma_fork;
56763 + tmp->vm_flags &= ~VM_LOCKED;
56764 + tmp->vm_next = tmp->vm_prev = NULL;
56765 + tmp->vm_mirror = NULL;
56766 + file = tmp->vm_file;
56767 + if (file) {
56768 + struct inode *inode = file->f_path.dentry->d_inode;
56769 + struct address_space *mapping = file->f_mapping;
56770 +
56771 + get_file(file);
56772 + if (tmp->vm_flags & VM_DENYWRITE)
56773 + atomic_dec(&inode->i_writecount);
56774 + spin_lock(&mapping->i_mmap_lock);
56775 + if (tmp->vm_flags & VM_SHARED)
56776 + mapping->i_mmap_writable++;
56777 + tmp->vm_truncate_count = mpnt->vm_truncate_count;
56778 + flush_dcache_mmap_lock(mapping);
56779 + /* insert tmp into the share list, just after mpnt */
56780 + vma_prio_tree_add(tmp, mpnt);
56781 + flush_dcache_mmap_unlock(mapping);
56782 + spin_unlock(&mapping->i_mmap_lock);
56783 + }
56784 +
56785 + /*
56786 + * Clear hugetlb-related page reserves for children. This only
56787 + * affects MAP_PRIVATE mappings. Faults generated by the child
56788 + * are not guaranteed to succeed, even if read-only
56789 + */
56790 + if (is_vm_hugetlb_page(tmp))
56791 + reset_vma_resv_huge_pages(tmp);
56792 +
56793 + return tmp;
56794 +
56795 +fail_nomem_anon_vma_fork:
56796 + mpol_put(pol);
56797 +fail_nomem_policy:
56798 + kmem_cache_free(vm_area_cachep, tmp);
56799 +fail_nomem:
56800 + vm_unacct_memory(charge);
56801 + return NULL;
56802 +}
56803 +
56804 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56805 {
56806 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56807 struct rb_node **rb_link, *rb_parent;
56808 int retval;
56809 - unsigned long charge;
56810 - struct mempolicy *pol;
56811
56812 down_write(&oldmm->mmap_sem);
56813 flush_cache_dup_mm(oldmm);
56814 @@ -327,8 +392,8 @@ static int dup_mmap(struct mm_struct *mm
56815 mm->locked_vm = 0;
56816 mm->mmap = NULL;
56817 mm->mmap_cache = NULL;
56818 - mm->free_area_cache = oldmm->mmap_base;
56819 - mm->cached_hole_size = ~0UL;
56820 + mm->free_area_cache = oldmm->free_area_cache;
56821 + mm->cached_hole_size = oldmm->cached_hole_size;
56822 mm->map_count = 0;
56823 cpumask_clear(mm_cpumask(mm));
56824 mm->mm_rb = RB_ROOT;
56825 @@ -344,8 +409,6 @@ static int dup_mmap(struct mm_struct *mm
56826
56827 prev = NULL;
56828 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56829 - struct file *file;
56830 -
56831 if (mpnt->vm_flags & VM_DONTCOPY) {
56832 long pages = vma_pages(mpnt);
56833 mm->total_vm -= pages;
56834 @@ -353,56 +416,13 @@ static int dup_mmap(struct mm_struct *mm
56835 -pages);
56836 continue;
56837 }
56838 - charge = 0;
56839 - if (mpnt->vm_flags & VM_ACCOUNT) {
56840 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56841 - if (security_vm_enough_memory(len))
56842 - goto fail_nomem;
56843 - charge = len;
56844 - }
56845 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56846 - if (!tmp)
56847 - goto fail_nomem;
56848 - *tmp = *mpnt;
56849 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
56850 - pol = mpol_dup(vma_policy(mpnt));
56851 - retval = PTR_ERR(pol);
56852 - if (IS_ERR(pol))
56853 - goto fail_nomem_policy;
56854 - vma_set_policy(tmp, pol);
56855 - tmp->vm_mm = mm;
56856 - if (anon_vma_fork(tmp, mpnt))
56857 - goto fail_nomem_anon_vma_fork;
56858 - tmp->vm_flags &= ~VM_LOCKED;
56859 - tmp->vm_next = tmp->vm_prev = NULL;
56860 - file = tmp->vm_file;
56861 - if (file) {
56862 - struct inode *inode = file->f_path.dentry->d_inode;
56863 - struct address_space *mapping = file->f_mapping;
56864 -
56865 - get_file(file);
56866 - if (tmp->vm_flags & VM_DENYWRITE)
56867 - atomic_dec(&inode->i_writecount);
56868 - spin_lock(&mapping->i_mmap_lock);
56869 - if (tmp->vm_flags & VM_SHARED)
56870 - mapping->i_mmap_writable++;
56871 - tmp->vm_truncate_count = mpnt->vm_truncate_count;
56872 - flush_dcache_mmap_lock(mapping);
56873 - /* insert tmp into the share list, just after mpnt */
56874 - vma_prio_tree_add(tmp, mpnt);
56875 - flush_dcache_mmap_unlock(mapping);
56876 - spin_unlock(&mapping->i_mmap_lock);
56877 + tmp = dup_vma(mm, mpnt);
56878 + if (!tmp) {
56879 + retval = -ENOMEM;
56880 + goto out;
56881 }
56882
56883 /*
56884 - * Clear hugetlb-related page reserves for children. This only
56885 - * affects MAP_PRIVATE mappings. Faults generated by the child
56886 - * are not guaranteed to succeed, even if read-only
56887 - */
56888 - if (is_vm_hugetlb_page(tmp))
56889 - reset_vma_resv_huge_pages(tmp);
56890 -
56891 - /*
56892 * Link in the new vma and copy the page table entries.
56893 */
56894 *pprev = tmp;
56895 @@ -423,6 +443,31 @@ static int dup_mmap(struct mm_struct *mm
56896 if (retval)
56897 goto out;
56898 }
56899 +
56900 +#ifdef CONFIG_PAX_SEGMEXEC
56901 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56902 + struct vm_area_struct *mpnt_m;
56903 +
56904 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56905 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56906 +
56907 + if (!mpnt->vm_mirror)
56908 + continue;
56909 +
56910 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56911 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
56912 + mpnt->vm_mirror = mpnt_m;
56913 + } else {
56914 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
56915 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
56916 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
56917 + mpnt->vm_mirror->vm_mirror = mpnt;
56918 + }
56919 + }
56920 + BUG_ON(mpnt_m);
56921 + }
56922 +#endif
56923 +
56924 /* a new mm has just been created */
56925 arch_dup_mmap(oldmm, mm);
56926 retval = 0;
56927 @@ -431,14 +476,6 @@ out:
56928 flush_tlb_mm(oldmm);
56929 up_write(&oldmm->mmap_sem);
56930 return retval;
56931 -fail_nomem_anon_vma_fork:
56932 - mpol_put(pol);
56933 -fail_nomem_policy:
56934 - kmem_cache_free(vm_area_cachep, tmp);
56935 -fail_nomem:
56936 - retval = -ENOMEM;
56937 - vm_unacct_memory(charge);
56938 - goto out;
56939 }
56940
56941 static inline int mm_alloc_pgd(struct mm_struct * mm)
56942 @@ -785,13 +822,14 @@ static int copy_fs(unsigned long clone_f
56943 spin_unlock(&fs->lock);
56944 return -EAGAIN;
56945 }
56946 - fs->users++;
56947 + atomic_inc(&fs->users);
56948 spin_unlock(&fs->lock);
56949 return 0;
56950 }
56951 tsk->fs = copy_fs_struct(fs);
56952 if (!tsk->fs)
56953 return -ENOMEM;
56954 + gr_set_chroot_entries(tsk, &tsk->fs->root);
56955 return 0;
56956 }
56957
56958 @@ -1049,10 +1087,13 @@ static struct task_struct *copy_process(
56959 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
56960 #endif
56961 retval = -EAGAIN;
56962 +
56963 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
56964 +
56965 if (atomic_read(&p->real_cred->user->processes) >=
56966 task_rlimit(p, RLIMIT_NPROC)) {
56967 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
56968 - p->real_cred->user != INIT_USER)
56969 + if (p->real_cred->user != INIT_USER &&
56970 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
56971 goto bad_fork_free;
56972 }
56973
56974 @@ -1200,6 +1241,8 @@ static struct task_struct *copy_process(
56975 goto bad_fork_free_pid;
56976 }
56977
56978 + gr_copy_label(p);
56979 +
56980 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
56981 /*
56982 * Clear TID on mm_release()?
56983 @@ -1360,6 +1403,8 @@ bad_fork_cleanup_count:
56984 bad_fork_free:
56985 free_task(p);
56986 fork_out:
56987 + gr_log_forkfail(retval);
56988 +
56989 return ERR_PTR(retval);
56990 }
56991
56992 @@ -1448,6 +1493,8 @@ long do_fork(unsigned long clone_flags,
56993 if (clone_flags & CLONE_PARENT_SETTID)
56994 put_user(nr, parent_tidptr);
56995
56996 + gr_handle_brute_check();
56997 +
56998 if (clone_flags & CLONE_VFORK) {
56999 p->vfork_done = &vfork;
57000 init_completion(&vfork);
57001 @@ -1549,7 +1596,7 @@ static int unshare_fs(unsigned long unsh
57002 return 0;
57003
57004 /* don't need lock here; in the worst case we'll do useless copy */
57005 - if (fs->users == 1)
57006 + if (atomic_read(&fs->users) == 1)
57007 return 0;
57008
57009 *new_fsp = copy_fs_struct(fs);
57010 @@ -1636,7 +1683,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
57011 fs = current->fs;
57012 spin_lock(&fs->lock);
57013 current->fs = new_fs;
57014 - if (--fs->users)
57015 + gr_set_chroot_entries(current, &current->fs->root);
57016 + if (atomic_dec_return(&fs->users))
57017 new_fs = NULL;
57018 else
57019 new_fs = fs;
57020 diff -urNp linux-2.6.39.4/kernel/futex.c linux-2.6.39.4/kernel/futex.c
57021 --- linux-2.6.39.4/kernel/futex.c 2011-05-19 00:06:34.000000000 -0400
57022 +++ linux-2.6.39.4/kernel/futex.c 2011-08-05 19:44:37.000000000 -0400
57023 @@ -54,6 +54,7 @@
57024 #include <linux/mount.h>
57025 #include <linux/pagemap.h>
57026 #include <linux/syscalls.h>
57027 +#include <linux/ptrace.h>
57028 #include <linux/signal.h>
57029 #include <linux/module.h>
57030 #include <linux/magic.h>
57031 @@ -236,6 +237,11 @@ get_futex_key(u32 __user *uaddr, int fsh
57032 struct page *page, *page_head;
57033 int err;
57034
57035 +#ifdef CONFIG_PAX_SEGMEXEC
57036 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
57037 + return -EFAULT;
57038 +#endif
57039 +
57040 /*
57041 * The futex address must be "naturally" aligned.
57042 */
57043 @@ -1833,6 +1839,8 @@ static int futex_wait(u32 __user *uaddr,
57044 struct futex_q q = futex_q_init;
57045 int ret;
57046
57047 + pax_track_stack();
57048 +
57049 if (!bitset)
57050 return -EINVAL;
57051 q.bitset = bitset;
57052 @@ -2229,6 +2237,8 @@ static int futex_wait_requeue_pi(u32 __u
57053 struct futex_q q = futex_q_init;
57054 int res, ret;
57055
57056 + pax_track_stack();
57057 +
57058 if (!bitset)
57059 return -EINVAL;
57060
57061 @@ -2401,7 +2411,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57062 {
57063 struct robust_list_head __user *head;
57064 unsigned long ret;
57065 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57066 const struct cred *cred = current_cred(), *pcred;
57067 +#endif
57068
57069 if (!futex_cmpxchg_enabled)
57070 return -ENOSYS;
57071 @@ -2417,6 +2429,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57072 if (!p)
57073 goto err_unlock;
57074 ret = -EPERM;
57075 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57076 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57077 + goto err_unlock;
57078 +#else
57079 pcred = __task_cred(p);
57080 /* If victim is in different user_ns, then uids are not
57081 comparable, so we must have CAP_SYS_PTRACE */
57082 @@ -2431,6 +2447,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57083 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57084 goto err_unlock;
57085 ok:
57086 +#endif
57087 head = p->robust_list;
57088 rcu_read_unlock();
57089 }
57090 @@ -2682,6 +2699,7 @@ static int __init futex_init(void)
57091 {
57092 u32 curval;
57093 int i;
57094 + mm_segment_t oldfs;
57095
57096 /*
57097 * This will fail and we want it. Some arch implementations do
57098 @@ -2693,8 +2711,11 @@ static int __init futex_init(void)
57099 * implementation, the non-functional ones will return
57100 * -ENOSYS.
57101 */
57102 + oldfs = get_fs();
57103 + set_fs(USER_DS);
57104 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57105 futex_cmpxchg_enabled = 1;
57106 + set_fs(oldfs);
57107
57108 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57109 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57110 diff -urNp linux-2.6.39.4/kernel/futex_compat.c linux-2.6.39.4/kernel/futex_compat.c
57111 --- linux-2.6.39.4/kernel/futex_compat.c 2011-05-19 00:06:34.000000000 -0400
57112 +++ linux-2.6.39.4/kernel/futex_compat.c 2011-08-05 19:44:37.000000000 -0400
57113 @@ -10,6 +10,7 @@
57114 #include <linux/compat.h>
57115 #include <linux/nsproxy.h>
57116 #include <linux/futex.h>
57117 +#include <linux/ptrace.h>
57118
57119 #include <asm/uaccess.h>
57120
57121 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57122 {
57123 struct compat_robust_list_head __user *head;
57124 unsigned long ret;
57125 - const struct cred *cred = current_cred(), *pcred;
57126 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57127 + const struct cred *cred = current_cred();
57128 + const struct cred *pcred;
57129 +#endif
57130
57131 if (!futex_cmpxchg_enabled)
57132 return -ENOSYS;
57133 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57134 if (!p)
57135 goto err_unlock;
57136 ret = -EPERM;
57137 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57138 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57139 + goto err_unlock;
57140 +#else
57141 pcred = __task_cred(p);
57142 /* If victim is in different user_ns, then uids are not
57143 comparable, so we must have CAP_SYS_PTRACE */
57144 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57145 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57146 goto err_unlock;
57147 ok:
57148 +#endif
57149 head = p->compat_robust_list;
57150 rcu_read_unlock();
57151 }
57152 diff -urNp linux-2.6.39.4/kernel/gcov/base.c linux-2.6.39.4/kernel/gcov/base.c
57153 --- linux-2.6.39.4/kernel/gcov/base.c 2011-05-19 00:06:34.000000000 -0400
57154 +++ linux-2.6.39.4/kernel/gcov/base.c 2011-08-05 19:44:37.000000000 -0400
57155 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
57156 }
57157
57158 #ifdef CONFIG_MODULES
57159 -static inline int within(void *addr, void *start, unsigned long size)
57160 -{
57161 - return ((addr >= start) && (addr < start + size));
57162 -}
57163 -
57164 /* Update list and generate events when modules are unloaded. */
57165 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57166 void *data)
57167 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57168 prev = NULL;
57169 /* Remove entries located in module from linked list. */
57170 for (info = gcov_info_head; info; info = info->next) {
57171 - if (within(info, mod->module_core, mod->core_size)) {
57172 + if (within_module_core_rw((unsigned long)info, mod)) {
57173 if (prev)
57174 prev->next = info->next;
57175 else
57176 diff -urNp linux-2.6.39.4/kernel/hrtimer.c linux-2.6.39.4/kernel/hrtimer.c
57177 --- linux-2.6.39.4/kernel/hrtimer.c 2011-05-19 00:06:34.000000000 -0400
57178 +++ linux-2.6.39.4/kernel/hrtimer.c 2011-08-05 19:44:37.000000000 -0400
57179 @@ -1383,7 +1383,7 @@ void hrtimer_peek_ahead_timers(void)
57180 local_irq_restore(flags);
57181 }
57182
57183 -static void run_hrtimer_softirq(struct softirq_action *h)
57184 +static void run_hrtimer_softirq(void)
57185 {
57186 hrtimer_peek_ahead_timers();
57187 }
57188 diff -urNp linux-2.6.39.4/kernel/irq/manage.c linux-2.6.39.4/kernel/irq/manage.c
57189 --- linux-2.6.39.4/kernel/irq/manage.c 2011-05-19 00:06:34.000000000 -0400
57190 +++ linux-2.6.39.4/kernel/irq/manage.c 2011-08-05 19:44:37.000000000 -0400
57191 @@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, u
57192 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
57193 int ret = 0;
57194
57195 + if (!desc)
57196 + return -EINVAL;
57197 +
57198 /* wakeup-capable irqs can be shared between drivers that
57199 * don't need to have the same sleep mode behaviors.
57200 */
57201 diff -urNp linux-2.6.39.4/kernel/jump_label.c linux-2.6.39.4/kernel/jump_label.c
57202 --- linux-2.6.39.4/kernel/jump_label.c 2011-05-19 00:06:34.000000000 -0400
57203 +++ linux-2.6.39.4/kernel/jump_label.c 2011-08-05 19:44:37.000000000 -0400
57204 @@ -49,6 +49,17 @@ void jump_label_unlock(void)
57205 mutex_unlock(&jump_label_mutex);
57206 }
57207
57208 +static void jump_label_swap(void *a, void *b, int size)
57209 +{
57210 + struct jump_entry t;
57211 +
57212 + t = *(struct jump_entry *)a;
57213 + pax_open_kernel();
57214 + *(struct jump_entry *)a = *(struct jump_entry *)b;
57215 + *(struct jump_entry *)b = t;
57216 + pax_close_kernel();
57217 +}
57218 +
57219 static int jump_label_cmp(const void *a, const void *b)
57220 {
57221 const struct jump_entry *jea = a;
57222 @@ -70,7 +81,7 @@ sort_jump_label_entries(struct jump_entr
57223
57224 size = (((unsigned long)stop - (unsigned long)start)
57225 / sizeof(struct jump_entry));
57226 - sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57227 + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, jump_label_swap);
57228 }
57229
57230 static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
57231 @@ -407,8 +418,11 @@ static void remove_jump_label_module_ini
57232 count = e_module->nr_entries;
57233 iter = e_module->table;
57234 while (count--) {
57235 - if (within_module_init(iter->code, mod))
57236 + if (within_module_init(iter->code, mod)) {
57237 + pax_open_kernel();
57238 iter->key = 0;
57239 + pax_close_kernel();
57240 + }
57241 iter++;
57242 }
57243 }
57244 diff -urNp linux-2.6.39.4/kernel/kallsyms.c linux-2.6.39.4/kernel/kallsyms.c
57245 --- linux-2.6.39.4/kernel/kallsyms.c 2011-05-19 00:06:34.000000000 -0400
57246 +++ linux-2.6.39.4/kernel/kallsyms.c 2011-08-05 19:44:37.000000000 -0400
57247 @@ -11,6 +11,9 @@
57248 * Changed the compression method from stem compression to "table lookup"
57249 * compression (see scripts/kallsyms.c for a more complete description)
57250 */
57251 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57252 +#define __INCLUDED_BY_HIDESYM 1
57253 +#endif
57254 #include <linux/kallsyms.h>
57255 #include <linux/module.h>
57256 #include <linux/init.h>
57257 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57258
57259 static inline int is_kernel_inittext(unsigned long addr)
57260 {
57261 + if (system_state != SYSTEM_BOOTING)
57262 + return 0;
57263 +
57264 if (addr >= (unsigned long)_sinittext
57265 && addr <= (unsigned long)_einittext)
57266 return 1;
57267 return 0;
57268 }
57269
57270 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57271 +#ifdef CONFIG_MODULES
57272 +static inline int is_module_text(unsigned long addr)
57273 +{
57274 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57275 + return 1;
57276 +
57277 + addr = ktla_ktva(addr);
57278 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57279 +}
57280 +#else
57281 +static inline int is_module_text(unsigned long addr)
57282 +{
57283 + return 0;
57284 +}
57285 +#endif
57286 +#endif
57287 +
57288 static inline int is_kernel_text(unsigned long addr)
57289 {
57290 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57291 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57292
57293 static inline int is_kernel(unsigned long addr)
57294 {
57295 +
57296 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57297 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
57298 + return 1;
57299 +
57300 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57301 +#else
57302 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57303 +#endif
57304 +
57305 return 1;
57306 return in_gate_area_no_mm(addr);
57307 }
57308
57309 static int is_ksym_addr(unsigned long addr)
57310 {
57311 +
57312 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57313 + if (is_module_text(addr))
57314 + return 0;
57315 +#endif
57316 +
57317 if (all_var)
57318 return is_kernel(addr);
57319
57320 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57321
57322 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57323 {
57324 - iter->name[0] = '\0';
57325 iter->nameoff = get_symbol_offset(new_pos);
57326 iter->pos = new_pos;
57327 }
57328 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57329 {
57330 struct kallsym_iter *iter = m->private;
57331
57332 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57333 + if (current_uid())
57334 + return 0;
57335 +#endif
57336 +
57337 /* Some debugging symbols have no name. Ignore them. */
57338 if (!iter->name[0])
57339 return 0;
57340 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57341 struct kallsym_iter *iter;
57342 int ret;
57343
57344 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57345 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57346 if (!iter)
57347 return -ENOMEM;
57348 reset_iter(iter, 0);
57349 diff -urNp linux-2.6.39.4/kernel/kmod.c linux-2.6.39.4/kernel/kmod.c
57350 --- linux-2.6.39.4/kernel/kmod.c 2011-05-19 00:06:34.000000000 -0400
57351 +++ linux-2.6.39.4/kernel/kmod.c 2011-08-05 19:44:37.000000000 -0400
57352 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57353 * If module auto-loading support is disabled then this function
57354 * becomes a no-operation.
57355 */
57356 -int __request_module(bool wait, const char *fmt, ...)
57357 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57358 {
57359 - va_list args;
57360 char module_name[MODULE_NAME_LEN];
57361 unsigned int max_modprobes;
57362 int ret;
57363 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57364 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57365 static char *envp[] = { "HOME=/",
57366 "TERM=linux",
57367 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57368 @@ -80,9 +79,7 @@ int __request_module(bool wait, const ch
57369 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57370 static int kmod_loop_msg;
57371
57372 - va_start(args, fmt);
57373 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57374 - va_end(args);
57375 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57376 if (ret >= MODULE_NAME_LEN)
57377 return -ENAMETOOLONG;
57378
57379 @@ -90,6 +87,20 @@ int __request_module(bool wait, const ch
57380 if (ret)
57381 return ret;
57382
57383 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57384 + if (!current_uid()) {
57385 + /* hack to workaround consolekit/udisks stupidity */
57386 + read_lock(&tasklist_lock);
57387 + if (!strcmp(current->comm, "mount") &&
57388 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57389 + read_unlock(&tasklist_lock);
57390 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57391 + return -EPERM;
57392 + }
57393 + read_unlock(&tasklist_lock);
57394 + }
57395 +#endif
57396 +
57397 /* If modprobe needs a service that is in a module, we get a recursive
57398 * loop. Limit the number of running kmod threads to max_threads/2 or
57399 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57400 @@ -123,6 +134,47 @@ int __request_module(bool wait, const ch
57401 atomic_dec(&kmod_concurrent);
57402 return ret;
57403 }
57404 +
57405 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57406 +{
57407 + va_list args;
57408 + int ret;
57409 +
57410 + va_start(args, fmt);
57411 + ret = ____request_module(wait, module_param, fmt, args);
57412 + va_end(args);
57413 +
57414 + return ret;
57415 +}
57416 +
57417 +int __request_module(bool wait, const char *fmt, ...)
57418 +{
57419 + va_list args;
57420 + int ret;
57421 +
57422 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57423 + if (current_uid()) {
57424 + char module_param[MODULE_NAME_LEN];
57425 +
57426 + memset(module_param, 0, sizeof(module_param));
57427 +
57428 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57429 +
57430 + va_start(args, fmt);
57431 + ret = ____request_module(wait, module_param, fmt, args);
57432 + va_end(args);
57433 +
57434 + return ret;
57435 + }
57436 +#endif
57437 +
57438 + va_start(args, fmt);
57439 + ret = ____request_module(wait, NULL, fmt, args);
57440 + va_end(args);
57441 +
57442 + return ret;
57443 +}
57444 +
57445 EXPORT_SYMBOL(__request_module);
57446 #endif /* CONFIG_MODULES */
57447
57448 diff -urNp linux-2.6.39.4/kernel/kprobes.c linux-2.6.39.4/kernel/kprobes.c
57449 --- linux-2.6.39.4/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
57450 +++ linux-2.6.39.4/kernel/kprobes.c 2011-08-05 19:44:37.000000000 -0400
57451 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57452 * kernel image and loaded module images reside. This is required
57453 * so x86_64 can correctly handle the %rip-relative fixups.
57454 */
57455 - kip->insns = module_alloc(PAGE_SIZE);
57456 + kip->insns = module_alloc_exec(PAGE_SIZE);
57457 if (!kip->insns) {
57458 kfree(kip);
57459 return NULL;
57460 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57461 */
57462 if (!list_is_singular(&kip->list)) {
57463 list_del(&kip->list);
57464 - module_free(NULL, kip->insns);
57465 + module_free_exec(NULL, kip->insns);
57466 kfree(kip);
57467 }
57468 return 1;
57469 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57470 {
57471 int i, err = 0;
57472 unsigned long offset = 0, size = 0;
57473 - char *modname, namebuf[128];
57474 + char *modname, namebuf[KSYM_NAME_LEN];
57475 const char *symbol_name;
57476 void *addr;
57477 struct kprobe_blackpoint *kb;
57478 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57479 const char *sym = NULL;
57480 unsigned int i = *(loff_t *) v;
57481 unsigned long offset = 0;
57482 - char *modname, namebuf[128];
57483 + char *modname, namebuf[KSYM_NAME_LEN];
57484
57485 head = &kprobe_table[i];
57486 preempt_disable();
57487 diff -urNp linux-2.6.39.4/kernel/lockdep.c linux-2.6.39.4/kernel/lockdep.c
57488 --- linux-2.6.39.4/kernel/lockdep.c 2011-06-25 12:55:23.000000000 -0400
57489 +++ linux-2.6.39.4/kernel/lockdep.c 2011-08-05 19:44:37.000000000 -0400
57490 @@ -571,6 +571,10 @@ static int static_obj(void *obj)
57491 end = (unsigned long) &_end,
57492 addr = (unsigned long) obj;
57493
57494 +#ifdef CONFIG_PAX_KERNEXEC
57495 + start = ktla_ktva(start);
57496 +#endif
57497 +
57498 /*
57499 * static variable?
57500 */
57501 @@ -706,6 +710,7 @@ register_lock_class(struct lockdep_map *
57502 if (!static_obj(lock->key)) {
57503 debug_locks_off();
57504 printk("INFO: trying to register non-static key.\n");
57505 + printk("lock:%pS key:%pS.\n", lock, lock->key);
57506 printk("the code is fine but needs lockdep annotation.\n");
57507 printk("turning off the locking correctness validator.\n");
57508 dump_stack();
57509 @@ -2752,7 +2757,7 @@ static int __lock_acquire(struct lockdep
57510 if (!class)
57511 return 0;
57512 }
57513 - atomic_inc((atomic_t *)&class->ops);
57514 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57515 if (very_verbose(class)) {
57516 printk("\nacquire class [%p] %s", class->key, class->name);
57517 if (class->name_version > 1)
57518 diff -urNp linux-2.6.39.4/kernel/lockdep_proc.c linux-2.6.39.4/kernel/lockdep_proc.c
57519 --- linux-2.6.39.4/kernel/lockdep_proc.c 2011-05-19 00:06:34.000000000 -0400
57520 +++ linux-2.6.39.4/kernel/lockdep_proc.c 2011-08-05 19:44:37.000000000 -0400
57521 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57522
57523 static void print_name(struct seq_file *m, struct lock_class *class)
57524 {
57525 - char str[128];
57526 + char str[KSYM_NAME_LEN];
57527 const char *name = class->name;
57528
57529 if (!name) {
57530 diff -urNp linux-2.6.39.4/kernel/module.c linux-2.6.39.4/kernel/module.c
57531 --- linux-2.6.39.4/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
57532 +++ linux-2.6.39.4/kernel/module.c 2011-08-05 19:44:37.000000000 -0400
57533 @@ -57,6 +57,7 @@
57534 #include <linux/kmemleak.h>
57535 #include <linux/jump_label.h>
57536 #include <linux/pfn.h>
57537 +#include <linux/grsecurity.h>
57538
57539 #define CREATE_TRACE_POINTS
57540 #include <trace/events/module.h>
57541 @@ -118,7 +119,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57542
57543 /* Bounds of module allocation, for speeding __module_address.
57544 * Protected by module_mutex. */
57545 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57546 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57547 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57548
57549 int register_module_notifier(struct notifier_block * nb)
57550 {
57551 @@ -282,7 +284,7 @@ bool each_symbol(bool (*fn)(const struct
57552 return true;
57553
57554 list_for_each_entry_rcu(mod, &modules, list) {
57555 - struct symsearch arr[] = {
57556 + struct symsearch modarr[] = {
57557 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57558 NOT_GPL_ONLY, false },
57559 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57560 @@ -304,7 +306,7 @@ bool each_symbol(bool (*fn)(const struct
57561 #endif
57562 };
57563
57564 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57565 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57566 return true;
57567 }
57568 return false;
57569 @@ -415,7 +417,7 @@ static inline void __percpu *mod_percpu(
57570 static int percpu_modalloc(struct module *mod,
57571 unsigned long size, unsigned long align)
57572 {
57573 - if (align > PAGE_SIZE) {
57574 + if (align-1 >= PAGE_SIZE) {
57575 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57576 mod->name, align, PAGE_SIZE);
57577 align = PAGE_SIZE;
57578 @@ -1143,7 +1145,7 @@ resolve_symbol_wait(struct module *mod,
57579 */
57580 #ifdef CONFIG_SYSFS
57581
57582 -#ifdef CONFIG_KALLSYMS
57583 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57584 static inline bool sect_empty(const Elf_Shdr *sect)
57585 {
57586 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57587 @@ -1612,17 +1614,17 @@ void unset_section_ro_nx(struct module *
57588 {
57589 unsigned long total_pages;
57590
57591 - if (mod->module_core == module_region) {
57592 + if (mod->module_core_rx == module_region) {
57593 /* Set core as NX+RW */
57594 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size);
57595 - set_memory_nx((unsigned long)mod->module_core, total_pages);
57596 - set_memory_rw((unsigned long)mod->module_core, total_pages);
57597 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_core_rx, mod->core_size_rx);
57598 + set_memory_nx((unsigned long)mod->module_core_rx, total_pages);
57599 + set_memory_rw((unsigned long)mod->module_core_rx, total_pages);
57600
57601 - } else if (mod->module_init == module_region) {
57602 + } else if (mod->module_init_rx == module_region) {
57603 /* Set init as NX+RW */
57604 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size);
57605 - set_memory_nx((unsigned long)mod->module_init, total_pages);
57606 - set_memory_rw((unsigned long)mod->module_init, total_pages);
57607 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_init_rx, mod->init_size_rx);
57608 + set_memory_nx((unsigned long)mod->module_init_rx, total_pages);
57609 + set_memory_rw((unsigned long)mod->module_init_rx, total_pages);
57610 }
57611 }
57612
57613 @@ -1633,14 +1635,14 @@ void set_all_modules_text_rw()
57614
57615 mutex_lock(&module_mutex);
57616 list_for_each_entry_rcu(mod, &modules, list) {
57617 - if ((mod->module_core) && (mod->core_text_size)) {
57618 - set_page_attributes(mod->module_core,
57619 - mod->module_core + mod->core_text_size,
57620 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57621 + set_page_attributes(mod->module_core_rx,
57622 + mod->module_core_rx + mod->core_size_rx,
57623 set_memory_rw);
57624 }
57625 - if ((mod->module_init) && (mod->init_text_size)) {
57626 - set_page_attributes(mod->module_init,
57627 - mod->module_init + mod->init_text_size,
57628 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57629 + set_page_attributes(mod->module_init_rx,
57630 + mod->module_init_rx + mod->init_size_rx,
57631 set_memory_rw);
57632 }
57633 }
57634 @@ -1654,14 +1656,14 @@ void set_all_modules_text_ro()
57635
57636 mutex_lock(&module_mutex);
57637 list_for_each_entry_rcu(mod, &modules, list) {
57638 - if ((mod->module_core) && (mod->core_text_size)) {
57639 - set_page_attributes(mod->module_core,
57640 - mod->module_core + mod->core_text_size,
57641 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57642 + set_page_attributes(mod->module_core_rx,
57643 + mod->module_core_rx + mod->core_size_rx,
57644 set_memory_ro);
57645 }
57646 - if ((mod->module_init) && (mod->init_text_size)) {
57647 - set_page_attributes(mod->module_init,
57648 - mod->module_init + mod->init_text_size,
57649 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57650 + set_page_attributes(mod->module_init_rx,
57651 + mod->module_init_rx + mod->init_size_rx,
57652 set_memory_ro);
57653 }
57654 }
57655 @@ -1696,17 +1698,20 @@ static void free_module(struct module *m
57656 destroy_params(mod->kp, mod->num_kp);
57657
57658 /* This may be NULL, but that's OK */
57659 - unset_section_ro_nx(mod, mod->module_init);
57660 - module_free(mod, mod->module_init);
57661 + unset_section_ro_nx(mod, mod->module_init_rx);
57662 + module_free(mod, mod->module_init_rw);
57663 + module_free_exec(mod, mod->module_init_rx);
57664 kfree(mod->args);
57665 percpu_modfree(mod);
57666
57667 /* Free lock-classes: */
57668 - lockdep_free_key_range(mod->module_core, mod->core_size);
57669 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57670 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57671
57672 /* Finally, free the core (containing the module structure) */
57673 - unset_section_ro_nx(mod, mod->module_core);
57674 - module_free(mod, mod->module_core);
57675 + unset_section_ro_nx(mod, mod->module_core_rx);
57676 + module_free_exec(mod, mod->module_core_rx);
57677 + module_free(mod, mod->module_core_rw);
57678
57679 #ifdef CONFIG_MPU
57680 update_protections(current->mm);
57681 @@ -1775,10 +1780,31 @@ static int simplify_symbols(struct modul
57682 unsigned int i;
57683 int ret = 0;
57684 const struct kernel_symbol *ksym;
57685 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57686 + int is_fs_load = 0;
57687 + int register_filesystem_found = 0;
57688 + char *p;
57689 +
57690 + p = strstr(mod->args, "grsec_modharden_fs");
57691 + if (p) {
57692 + char *endptr = p + strlen("grsec_modharden_fs");
57693 + /* copy \0 as well */
57694 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57695 + is_fs_load = 1;
57696 + }
57697 +#endif
57698
57699 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57700 const char *name = info->strtab + sym[i].st_name;
57701
57702 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57703 + /* it's a real shame this will never get ripped and copied
57704 + upstream! ;(
57705 + */
57706 + if (is_fs_load && !strcmp(name, "register_filesystem"))
57707 + register_filesystem_found = 1;
57708 +#endif
57709 +
57710 switch (sym[i].st_shndx) {
57711 case SHN_COMMON:
57712 /* We compiled with -fno-common. These are not
57713 @@ -1799,7 +1825,9 @@ static int simplify_symbols(struct modul
57714 ksym = resolve_symbol_wait(mod, info, name);
57715 /* Ok if resolved. */
57716 if (ksym && !IS_ERR(ksym)) {
57717 + pax_open_kernel();
57718 sym[i].st_value = ksym->value;
57719 + pax_close_kernel();
57720 break;
57721 }
57722
57723 @@ -1818,11 +1846,20 @@ static int simplify_symbols(struct modul
57724 secbase = (unsigned long)mod_percpu(mod);
57725 else
57726 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57727 + pax_open_kernel();
57728 sym[i].st_value += secbase;
57729 + pax_close_kernel();
57730 break;
57731 }
57732 }
57733
57734 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57735 + if (is_fs_load && !register_filesystem_found) {
57736 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57737 + ret = -EPERM;
57738 + }
57739 +#endif
57740 +
57741 return ret;
57742 }
57743
57744 @@ -1906,22 +1943,12 @@ static void layout_sections(struct modul
57745 || s->sh_entsize != ~0UL
57746 || strstarts(sname, ".init"))
57747 continue;
57748 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57749 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57750 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57751 + else
57752 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57753 DEBUGP("\t%s\n", name);
57754 }
57755 - switch (m) {
57756 - case 0: /* executable */
57757 - mod->core_size = debug_align(mod->core_size);
57758 - mod->core_text_size = mod->core_size;
57759 - break;
57760 - case 1: /* RO: text and ro-data */
57761 - mod->core_size = debug_align(mod->core_size);
57762 - mod->core_ro_size = mod->core_size;
57763 - break;
57764 - case 3: /* whole core */
57765 - mod->core_size = debug_align(mod->core_size);
57766 - break;
57767 - }
57768 }
57769
57770 DEBUGP("Init section allocation order:\n");
57771 @@ -1935,23 +1962,13 @@ static void layout_sections(struct modul
57772 || s->sh_entsize != ~0UL
57773 || !strstarts(sname, ".init"))
57774 continue;
57775 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57776 - | INIT_OFFSET_MASK);
57777 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57778 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57779 + else
57780 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57781 + s->sh_entsize |= INIT_OFFSET_MASK;
57782 DEBUGP("\t%s\n", sname);
57783 }
57784 - switch (m) {
57785 - case 0: /* executable */
57786 - mod->init_size = debug_align(mod->init_size);
57787 - mod->init_text_size = mod->init_size;
57788 - break;
57789 - case 1: /* RO: text and ro-data */
57790 - mod->init_size = debug_align(mod->init_size);
57791 - mod->init_ro_size = mod->init_size;
57792 - break;
57793 - case 3: /* whole init */
57794 - mod->init_size = debug_align(mod->init_size);
57795 - break;
57796 - }
57797 }
57798 }
57799
57800 @@ -2119,7 +2136,7 @@ static void layout_symtab(struct module
57801
57802 /* Put symbol section at end of init part of module. */
57803 symsect->sh_flags |= SHF_ALLOC;
57804 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57805 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57806 info->index.sym) | INIT_OFFSET_MASK;
57807 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57808
57809 @@ -2136,19 +2153,19 @@ static void layout_symtab(struct module
57810 }
57811
57812 /* Append room for core symbols at end of core part. */
57813 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57814 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57815 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57816 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57817
57818 /* Put string table section at end of init part of module. */
57819 strsect->sh_flags |= SHF_ALLOC;
57820 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57821 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57822 info->index.str) | INIT_OFFSET_MASK;
57823 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57824
57825 /* Append room for core symbols' strings at end of core part. */
57826 - info->stroffs = mod->core_size;
57827 + info->stroffs = mod->core_size_rx;
57828 __set_bit(0, info->strmap);
57829 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57830 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57831 }
57832
57833 static void add_kallsyms(struct module *mod, const struct load_info *info)
57834 @@ -2164,11 +2181,13 @@ static void add_kallsyms(struct module *
57835 /* Make sure we get permanent strtab: don't use info->strtab. */
57836 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57837
57838 + pax_open_kernel();
57839 +
57840 /* Set types up while we still have access to sections. */
57841 for (i = 0; i < mod->num_symtab; i++)
57842 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57843
57844 - mod->core_symtab = dst = mod->module_core + info->symoffs;
57845 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57846 src = mod->symtab;
57847 *dst = *src;
57848 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57849 @@ -2181,10 +2200,12 @@ static void add_kallsyms(struct module *
57850 }
57851 mod->core_num_syms = ndst;
57852
57853 - mod->core_strtab = s = mod->module_core + info->stroffs;
57854 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57855 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57856 if (test_bit(i, info->strmap))
57857 *++s = mod->strtab[i];
57858 +
57859 + pax_close_kernel();
57860 }
57861 #else
57862 static inline void layout_symtab(struct module *mod, struct load_info *info)
57863 @@ -2213,17 +2234,33 @@ static void dynamic_debug_remove(struct
57864 ddebug_remove_module(debug->modname);
57865 }
57866
57867 -static void *module_alloc_update_bounds(unsigned long size)
57868 +static void *module_alloc_update_bounds_rw(unsigned long size)
57869 {
57870 void *ret = module_alloc(size);
57871
57872 if (ret) {
57873 mutex_lock(&module_mutex);
57874 /* Update module bounds. */
57875 - if ((unsigned long)ret < module_addr_min)
57876 - module_addr_min = (unsigned long)ret;
57877 - if ((unsigned long)ret + size > module_addr_max)
57878 - module_addr_max = (unsigned long)ret + size;
57879 + if ((unsigned long)ret < module_addr_min_rw)
57880 + module_addr_min_rw = (unsigned long)ret;
57881 + if ((unsigned long)ret + size > module_addr_max_rw)
57882 + module_addr_max_rw = (unsigned long)ret + size;
57883 + mutex_unlock(&module_mutex);
57884 + }
57885 + return ret;
57886 +}
57887 +
57888 +static void *module_alloc_update_bounds_rx(unsigned long size)
57889 +{
57890 + void *ret = module_alloc_exec(size);
57891 +
57892 + if (ret) {
57893 + mutex_lock(&module_mutex);
57894 + /* Update module bounds. */
57895 + if ((unsigned long)ret < module_addr_min_rx)
57896 + module_addr_min_rx = (unsigned long)ret;
57897 + if ((unsigned long)ret + size > module_addr_max_rx)
57898 + module_addr_max_rx = (unsigned long)ret + size;
57899 mutex_unlock(&module_mutex);
57900 }
57901 return ret;
57902 @@ -2516,7 +2553,7 @@ static int move_module(struct module *mo
57903 void *ptr;
57904
57905 /* Do the allocs. */
57906 - ptr = module_alloc_update_bounds(mod->core_size);
57907 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57908 /*
57909 * The pointer to this block is stored in the module structure
57910 * which is inside the block. Just mark it as not being a
57911 @@ -2526,23 +2563,50 @@ static int move_module(struct module *mo
57912 if (!ptr)
57913 return -ENOMEM;
57914
57915 - memset(ptr, 0, mod->core_size);
57916 - mod->module_core = ptr;
57917 + memset(ptr, 0, mod->core_size_rw);
57918 + mod->module_core_rw = ptr;
57919
57920 - ptr = module_alloc_update_bounds(mod->init_size);
57921 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
57922 /*
57923 * The pointer to this block is stored in the module structure
57924 * which is inside the block. This block doesn't need to be
57925 * scanned as it contains data and code that will be freed
57926 * after the module is initialized.
57927 */
57928 - kmemleak_ignore(ptr);
57929 - if (!ptr && mod->init_size) {
57930 - module_free(mod, mod->module_core);
57931 + kmemleak_not_leak(ptr);
57932 + if (!ptr && mod->init_size_rw) {
57933 + module_free(mod, mod->module_core_rw);
57934 return -ENOMEM;
57935 }
57936 - memset(ptr, 0, mod->init_size);
57937 - mod->module_init = ptr;
57938 + memset(ptr, 0, mod->init_size_rw);
57939 + mod->module_init_rw = ptr;
57940 +
57941 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
57942 + kmemleak_not_leak(ptr);
57943 + if (!ptr) {
57944 + module_free(mod, mod->module_init_rw);
57945 + module_free(mod, mod->module_core_rw);
57946 + return -ENOMEM;
57947 + }
57948 +
57949 + pax_open_kernel();
57950 + memset(ptr, 0, mod->core_size_rx);
57951 + pax_close_kernel();
57952 + mod->module_core_rx = ptr;
57953 +
57954 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
57955 + kmemleak_not_leak(ptr);
57956 + if (!ptr && mod->init_size_rx) {
57957 + module_free_exec(mod, mod->module_core_rx);
57958 + module_free(mod, mod->module_init_rw);
57959 + module_free(mod, mod->module_core_rw);
57960 + return -ENOMEM;
57961 + }
57962 +
57963 + pax_open_kernel();
57964 + memset(ptr, 0, mod->init_size_rx);
57965 + pax_close_kernel();
57966 + mod->module_init_rx = ptr;
57967
57968 /* Transfer each section which specifies SHF_ALLOC */
57969 DEBUGP("final section addresses:\n");
57970 @@ -2553,16 +2617,45 @@ static int move_module(struct module *mo
57971 if (!(shdr->sh_flags & SHF_ALLOC))
57972 continue;
57973
57974 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
57975 - dest = mod->module_init
57976 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57977 - else
57978 - dest = mod->module_core + shdr->sh_entsize;
57979 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
57980 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57981 + dest = mod->module_init_rw
57982 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57983 + else
57984 + dest = mod->module_init_rx
57985 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
57986 + } else {
57987 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
57988 + dest = mod->module_core_rw + shdr->sh_entsize;
57989 + else
57990 + dest = mod->module_core_rx + shdr->sh_entsize;
57991 + }
57992 +
57993 + if (shdr->sh_type != SHT_NOBITS) {
57994 +
57995 +#ifdef CONFIG_PAX_KERNEXEC
57996 +#ifdef CONFIG_X86_64
57997 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
57998 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
57999 +#endif
58000 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
58001 + pax_open_kernel();
58002 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58003 + pax_close_kernel();
58004 + } else
58005 +#endif
58006
58007 - if (shdr->sh_type != SHT_NOBITS)
58008 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58009 + }
58010 /* Update sh_addr to point to copy in image. */
58011 - shdr->sh_addr = (unsigned long)dest;
58012 +
58013 +#ifdef CONFIG_PAX_KERNEXEC
58014 + if (shdr->sh_flags & SHF_EXECINSTR)
58015 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
58016 + else
58017 +#endif
58018 +
58019 + shdr->sh_addr = (unsigned long)dest;
58020 DEBUGP("\t0x%lx %s\n",
58021 shdr->sh_addr, info->secstrings + shdr->sh_name);
58022 }
58023 @@ -2613,12 +2706,12 @@ static void flush_module_icache(const st
58024 * Do it before processing of module parameters, so the module
58025 * can provide parameter accessor functions of its own.
58026 */
58027 - if (mod->module_init)
58028 - flush_icache_range((unsigned long)mod->module_init,
58029 - (unsigned long)mod->module_init
58030 - + mod->init_size);
58031 - flush_icache_range((unsigned long)mod->module_core,
58032 - (unsigned long)mod->module_core + mod->core_size);
58033 + if (mod->module_init_rx)
58034 + flush_icache_range((unsigned long)mod->module_init_rx,
58035 + (unsigned long)mod->module_init_rx
58036 + + mod->init_size_rx);
58037 + flush_icache_range((unsigned long)mod->module_core_rx,
58038 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
58039
58040 set_fs(old_fs);
58041 }
58042 @@ -2690,8 +2783,10 @@ static void module_deallocate(struct mod
58043 {
58044 kfree(info->strmap);
58045 percpu_modfree(mod);
58046 - module_free(mod, mod->module_init);
58047 - module_free(mod, mod->module_core);
58048 + module_free_exec(mod, mod->module_init_rx);
58049 + module_free_exec(mod, mod->module_core_rx);
58050 + module_free(mod, mod->module_init_rw);
58051 + module_free(mod, mod->module_core_rw);
58052 }
58053
58054 static int post_relocation(struct module *mod, const struct load_info *info)
58055 @@ -2748,9 +2843,38 @@ static struct module *load_module(void _
58056 if (err)
58057 goto free_unload;
58058
58059 + /* Now copy in args */
58060 + mod->args = strndup_user(uargs, ~0UL >> 1);
58061 + if (IS_ERR(mod->args)) {
58062 + err = PTR_ERR(mod->args);
58063 + goto free_unload;
58064 + }
58065 +
58066 /* Set up MODINFO_ATTR fields */
58067 setup_modinfo(mod, &info);
58068
58069 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
58070 + {
58071 + char *p, *p2;
58072 +
58073 + if (strstr(mod->args, "grsec_modharden_netdev")) {
58074 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
58075 + err = -EPERM;
58076 + goto free_modinfo;
58077 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
58078 + p += strlen("grsec_modharden_normal");
58079 + p2 = strstr(p, "_");
58080 + if (p2) {
58081 + *p2 = '\0';
58082 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
58083 + *p2 = '_';
58084 + }
58085 + err = -EPERM;
58086 + goto free_modinfo;
58087 + }
58088 + }
58089 +#endif
58090 +
58091 /* Fix up syms, so that st_value is a pointer to location. */
58092 err = simplify_symbols(mod, &info);
58093 if (err < 0)
58094 @@ -2766,13 +2890,6 @@ static struct module *load_module(void _
58095
58096 flush_module_icache(mod);
58097
58098 - /* Now copy in args */
58099 - mod->args = strndup_user(uargs, ~0UL >> 1);
58100 - if (IS_ERR(mod->args)) {
58101 - err = PTR_ERR(mod->args);
58102 - goto free_arch_cleanup;
58103 - }
58104 -
58105 /* Mark state as coming so strong_try_module_get() ignores us. */
58106 mod->state = MODULE_STATE_COMING;
58107
58108 @@ -2832,11 +2949,10 @@ static struct module *load_module(void _
58109 unlock:
58110 mutex_unlock(&module_mutex);
58111 synchronize_sched();
58112 - kfree(mod->args);
58113 - free_arch_cleanup:
58114 module_arch_cleanup(mod);
58115 free_modinfo:
58116 free_modinfo(mod);
58117 + kfree(mod->args);
58118 free_unload:
58119 module_unload_free(mod);
58120 free_module:
58121 @@ -2877,16 +2993,16 @@ SYSCALL_DEFINE3(init_module, void __user
58122 MODULE_STATE_COMING, mod);
58123
58124 /* Set RO and NX regions for core */
58125 - set_section_ro_nx(mod->module_core,
58126 - mod->core_text_size,
58127 - mod->core_ro_size,
58128 - mod->core_size);
58129 + set_section_ro_nx(mod->module_core_rx,
58130 + mod->core_size_rx,
58131 + mod->core_size_rx,
58132 + mod->core_size_rx);
58133
58134 /* Set RO and NX regions for init */
58135 - set_section_ro_nx(mod->module_init,
58136 - mod->init_text_size,
58137 - mod->init_ro_size,
58138 - mod->init_size);
58139 + set_section_ro_nx(mod->module_init_rx,
58140 + mod->init_size_rx,
58141 + mod->init_size_rx,
58142 + mod->init_size_rx);
58143
58144 do_mod_ctors(mod);
58145 /* Start the module */
58146 @@ -2931,11 +3047,13 @@ SYSCALL_DEFINE3(init_module, void __user
58147 mod->symtab = mod->core_symtab;
58148 mod->strtab = mod->core_strtab;
58149 #endif
58150 - unset_section_ro_nx(mod, mod->module_init);
58151 - module_free(mod, mod->module_init);
58152 - mod->module_init = NULL;
58153 - mod->init_size = 0;
58154 - mod->init_text_size = 0;
58155 + unset_section_ro_nx(mod, mod->module_init_rx);
58156 + module_free(mod, mod->module_init_rw);
58157 + module_free_exec(mod, mod->module_init_rx);
58158 + mod->module_init_rw = NULL;
58159 + mod->module_init_rx = NULL;
58160 + mod->init_size_rw = 0;
58161 + mod->init_size_rx = 0;
58162 mutex_unlock(&module_mutex);
58163
58164 return 0;
58165 @@ -2966,10 +3084,16 @@ static const char *get_ksymbol(struct mo
58166 unsigned long nextval;
58167
58168 /* At worse, next value is at end of module */
58169 - if (within_module_init(addr, mod))
58170 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
58171 + if (within_module_init_rx(addr, mod))
58172 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58173 + else if (within_module_init_rw(addr, mod))
58174 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58175 + else if (within_module_core_rx(addr, mod))
58176 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58177 + else if (within_module_core_rw(addr, mod))
58178 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58179 else
58180 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
58181 + return NULL;
58182
58183 /* Scan for closest preceding symbol, and next symbol. (ELF
58184 starts real symbols at 1). */
58185 @@ -3215,7 +3339,7 @@ static int m_show(struct seq_file *m, vo
58186 char buf[8];
58187
58188 seq_printf(m, "%s %u",
58189 - mod->name, mod->init_size + mod->core_size);
58190 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58191 print_unload_info(m, mod);
58192
58193 /* Informative for users. */
58194 @@ -3224,7 +3348,7 @@ static int m_show(struct seq_file *m, vo
58195 mod->state == MODULE_STATE_COMING ? "Loading":
58196 "Live");
58197 /* Used by oprofile and other similar tools. */
58198 - seq_printf(m, " 0x%pK", mod->module_core);
58199 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58200
58201 /* Taints info */
58202 if (mod->taints)
58203 @@ -3260,7 +3384,17 @@ static const struct file_operations proc
58204
58205 static int __init proc_modules_init(void)
58206 {
58207 +#ifndef CONFIG_GRKERNSEC_HIDESYM
58208 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58209 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58210 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58211 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58212 +#else
58213 proc_create("modules", 0, NULL, &proc_modules_operations);
58214 +#endif
58215 +#else
58216 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58217 +#endif
58218 return 0;
58219 }
58220 module_init(proc_modules_init);
58221 @@ -3319,12 +3453,12 @@ struct module *__module_address(unsigned
58222 {
58223 struct module *mod;
58224
58225 - if (addr < module_addr_min || addr > module_addr_max)
58226 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58227 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
58228 return NULL;
58229
58230 list_for_each_entry_rcu(mod, &modules, list)
58231 - if (within_module_core(addr, mod)
58232 - || within_module_init(addr, mod))
58233 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
58234 return mod;
58235 return NULL;
58236 }
58237 @@ -3358,11 +3492,20 @@ bool is_module_text_address(unsigned lon
58238 */
58239 struct module *__module_text_address(unsigned long addr)
58240 {
58241 - struct module *mod = __module_address(addr);
58242 + struct module *mod;
58243 +
58244 +#ifdef CONFIG_X86_32
58245 + addr = ktla_ktva(addr);
58246 +#endif
58247 +
58248 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58249 + return NULL;
58250 +
58251 + mod = __module_address(addr);
58252 +
58253 if (mod) {
58254 /* Make sure it's within the text section. */
58255 - if (!within(addr, mod->module_init, mod->init_text_size)
58256 - && !within(addr, mod->module_core, mod->core_text_size))
58257 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58258 mod = NULL;
58259 }
58260 return mod;
58261 diff -urNp linux-2.6.39.4/kernel/mutex.c linux-2.6.39.4/kernel/mutex.c
58262 --- linux-2.6.39.4/kernel/mutex.c 2011-05-19 00:06:34.000000000 -0400
58263 +++ linux-2.6.39.4/kernel/mutex.c 2011-08-05 19:44:37.000000000 -0400
58264 @@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock,
58265 */
58266
58267 for (;;) {
58268 - struct thread_info *owner;
58269 + struct task_struct *owner;
58270
58271 /*
58272 * If we own the BKL, then don't spin. The owner of
58273 @@ -205,7 +205,7 @@ __mutex_lock_common(struct mutex *lock,
58274 spin_lock_mutex(&lock->wait_lock, flags);
58275
58276 debug_mutex_lock_common(lock, &waiter);
58277 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58278 + debug_mutex_add_waiter(lock, &waiter, task);
58279
58280 /* add waiting tasks to the end of the waitqueue (FIFO): */
58281 list_add_tail(&waiter.list, &lock->wait_list);
58282 @@ -234,8 +234,7 @@ __mutex_lock_common(struct mutex *lock,
58283 * TASK_UNINTERRUPTIBLE case.)
58284 */
58285 if (unlikely(signal_pending_state(state, task))) {
58286 - mutex_remove_waiter(lock, &waiter,
58287 - task_thread_info(task));
58288 + mutex_remove_waiter(lock, &waiter, task);
58289 mutex_release(&lock->dep_map, 1, ip);
58290 spin_unlock_mutex(&lock->wait_lock, flags);
58291
58292 @@ -256,7 +255,7 @@ __mutex_lock_common(struct mutex *lock,
58293 done:
58294 lock_acquired(&lock->dep_map, ip);
58295 /* got the lock - rejoice! */
58296 - mutex_remove_waiter(lock, &waiter, current_thread_info());
58297 + mutex_remove_waiter(lock, &waiter, task);
58298 mutex_set_owner(lock);
58299
58300 /* set it to 0 if there are no waiters left: */
58301 diff -urNp linux-2.6.39.4/kernel/mutex-debug.c linux-2.6.39.4/kernel/mutex-debug.c
58302 --- linux-2.6.39.4/kernel/mutex-debug.c 2011-05-19 00:06:34.000000000 -0400
58303 +++ linux-2.6.39.4/kernel/mutex-debug.c 2011-08-05 19:44:37.000000000 -0400
58304 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58305 }
58306
58307 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58308 - struct thread_info *ti)
58309 + struct task_struct *task)
58310 {
58311 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58312
58313 /* Mark the current thread as blocked on the lock: */
58314 - ti->task->blocked_on = waiter;
58315 + task->blocked_on = waiter;
58316 }
58317
58318 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58319 - struct thread_info *ti)
58320 + struct task_struct *task)
58321 {
58322 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58323 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58324 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58325 - ti->task->blocked_on = NULL;
58326 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
58327 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58328 + task->blocked_on = NULL;
58329
58330 list_del_init(&waiter->list);
58331 waiter->task = NULL;
58332 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
58333 return;
58334
58335 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
58336 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
58337 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
58338 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
58339 mutex_clear_owner(lock);
58340 }
58341 diff -urNp linux-2.6.39.4/kernel/mutex-debug.h linux-2.6.39.4/kernel/mutex-debug.h
58342 --- linux-2.6.39.4/kernel/mutex-debug.h 2011-05-19 00:06:34.000000000 -0400
58343 +++ linux-2.6.39.4/kernel/mutex-debug.h 2011-08-05 19:44:37.000000000 -0400
58344 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
58345 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58346 extern void debug_mutex_add_waiter(struct mutex *lock,
58347 struct mutex_waiter *waiter,
58348 - struct thread_info *ti);
58349 + struct task_struct *task);
58350 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58351 - struct thread_info *ti);
58352 + struct task_struct *task);
58353 extern void debug_mutex_unlock(struct mutex *lock);
58354 extern void debug_mutex_init(struct mutex *lock, const char *name,
58355 struct lock_class_key *key);
58356
58357 static inline void mutex_set_owner(struct mutex *lock)
58358 {
58359 - lock->owner = current_thread_info();
58360 + lock->owner = current;
58361 }
58362
58363 static inline void mutex_clear_owner(struct mutex *lock)
58364 diff -urNp linux-2.6.39.4/kernel/mutex.h linux-2.6.39.4/kernel/mutex.h
58365 --- linux-2.6.39.4/kernel/mutex.h 2011-05-19 00:06:34.000000000 -0400
58366 +++ linux-2.6.39.4/kernel/mutex.h 2011-08-05 19:44:37.000000000 -0400
58367 @@ -19,7 +19,7 @@
58368 #ifdef CONFIG_SMP
58369 static inline void mutex_set_owner(struct mutex *lock)
58370 {
58371 - lock->owner = current_thread_info();
58372 + lock->owner = current;
58373 }
58374
58375 static inline void mutex_clear_owner(struct mutex *lock)
58376 diff -urNp linux-2.6.39.4/kernel/padata.c linux-2.6.39.4/kernel/padata.c
58377 --- linux-2.6.39.4/kernel/padata.c 2011-05-19 00:06:34.000000000 -0400
58378 +++ linux-2.6.39.4/kernel/padata.c 2011-08-05 19:44:37.000000000 -0400
58379 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58380 padata->pd = pd;
58381 padata->cb_cpu = cb_cpu;
58382
58383 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58384 - atomic_set(&pd->seq_nr, -1);
58385 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58386 + atomic_set_unchecked(&pd->seq_nr, -1);
58387
58388 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58389 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58390
58391 target_cpu = padata_cpu_hash(padata);
58392 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58393 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58394 padata_init_pqueues(pd);
58395 padata_init_squeues(pd);
58396 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58397 - atomic_set(&pd->seq_nr, -1);
58398 + atomic_set_unchecked(&pd->seq_nr, -1);
58399 atomic_set(&pd->reorder_objects, 0);
58400 atomic_set(&pd->refcnt, 0);
58401 pd->pinst = pinst;
58402 diff -urNp linux-2.6.39.4/kernel/panic.c linux-2.6.39.4/kernel/panic.c
58403 --- linux-2.6.39.4/kernel/panic.c 2011-05-19 00:06:34.000000000 -0400
58404 +++ linux-2.6.39.4/kernel/panic.c 2011-08-05 19:44:37.000000000 -0400
58405 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58406 const char *board;
58407
58408 printk(KERN_WARNING "------------[ cut here ]------------\n");
58409 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58410 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58411 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58412 if (board)
58413 printk(KERN_WARNING "Hardware name: %s\n", board);
58414 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58415 */
58416 void __stack_chk_fail(void)
58417 {
58418 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
58419 + dump_stack();
58420 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58421 __builtin_return_address(0));
58422 }
58423 EXPORT_SYMBOL(__stack_chk_fail);
58424 diff -urNp linux-2.6.39.4/kernel/perf_event.c linux-2.6.39.4/kernel/perf_event.c
58425 --- linux-2.6.39.4/kernel/perf_event.c 2011-05-19 00:06:34.000000000 -0400
58426 +++ linux-2.6.39.4/kernel/perf_event.c 2011-08-05 20:34:06.000000000 -0400
58427 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
58428 return 0;
58429 }
58430
58431 -static atomic64_t perf_event_id;
58432 +static atomic64_unchecked_t perf_event_id;
58433
58434 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
58435 enum event_type_t event_type);
58436 @@ -2496,7 +2496,7 @@ static void __perf_event_read(void *info
58437
58438 static inline u64 perf_event_count(struct perf_event *event)
58439 {
58440 - return local64_read(&event->count) + atomic64_read(&event->child_count);
58441 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
58442 }
58443
58444 static u64 perf_event_read(struct perf_event *event)
58445 @@ -3031,9 +3031,9 @@ u64 perf_event_read_value(struct perf_ev
58446 mutex_lock(&event->child_mutex);
58447 total += perf_event_read(event);
58448 *enabled += event->total_time_enabled +
58449 - atomic64_read(&event->child_total_time_enabled);
58450 + atomic64_read_unchecked(&event->child_total_time_enabled);
58451 *running += event->total_time_running +
58452 - atomic64_read(&event->child_total_time_running);
58453 + atomic64_read_unchecked(&event->child_total_time_running);
58454
58455 list_for_each_entry(child, &event->child_list, child_list) {
58456 total += perf_event_read(child);
58457 @@ -3396,10 +3396,10 @@ void perf_event_update_userpage(struct p
58458 userpg->offset -= local64_read(&event->hw.prev_count);
58459
58460 userpg->time_enabled = event->total_time_enabled +
58461 - atomic64_read(&event->child_total_time_enabled);
58462 + atomic64_read_unchecked(&event->child_total_time_enabled);
58463
58464 userpg->time_running = event->total_time_running +
58465 - atomic64_read(&event->child_total_time_running);
58466 + atomic64_read_unchecked(&event->child_total_time_running);
58467
58468 barrier();
58469 ++userpg->lock;
58470 @@ -4196,11 +4196,11 @@ static void perf_output_read_one(struct
58471 values[n++] = perf_event_count(event);
58472 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
58473 values[n++] = enabled +
58474 - atomic64_read(&event->child_total_time_enabled);
58475 + atomic64_read_unchecked(&event->child_total_time_enabled);
58476 }
58477 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
58478 values[n++] = running +
58479 - atomic64_read(&event->child_total_time_running);
58480 + atomic64_read_unchecked(&event->child_total_time_running);
58481 }
58482 if (read_format & PERF_FORMAT_ID)
58483 values[n++] = primary_event_id(event);
58484 @@ -6201,7 +6201,7 @@ perf_event_alloc(struct perf_event_attr
58485 event->parent = parent_event;
58486
58487 event->ns = get_pid_ns(current->nsproxy->pid_ns);
58488 - event->id = atomic64_inc_return(&perf_event_id);
58489 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
58490
58491 event->state = PERF_EVENT_STATE_INACTIVE;
58492
58493 @@ -6724,10 +6724,10 @@ static void sync_child_event(struct perf
58494 /*
58495 * Add back the child's count to the parent's count:
58496 */
58497 - atomic64_add(child_val, &parent_event->child_count);
58498 - atomic64_add(child_event->total_time_enabled,
58499 + atomic64_add_unchecked(child_val, &parent_event->child_count);
58500 + atomic64_add_unchecked(child_event->total_time_enabled,
58501 &parent_event->child_total_time_enabled);
58502 - atomic64_add(child_event->total_time_running,
58503 + atomic64_add_unchecked(child_event->total_time_running,
58504 &parent_event->child_total_time_running);
58505
58506 /*
58507 diff -urNp linux-2.6.39.4/kernel/pid.c linux-2.6.39.4/kernel/pid.c
58508 --- linux-2.6.39.4/kernel/pid.c 2011-05-19 00:06:34.000000000 -0400
58509 +++ linux-2.6.39.4/kernel/pid.c 2011-08-05 19:44:37.000000000 -0400
58510 @@ -33,6 +33,7 @@
58511 #include <linux/rculist.h>
58512 #include <linux/bootmem.h>
58513 #include <linux/hash.h>
58514 +#include <linux/security.h>
58515 #include <linux/pid_namespace.h>
58516 #include <linux/init_task.h>
58517 #include <linux/syscalls.h>
58518 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58519
58520 int pid_max = PID_MAX_DEFAULT;
58521
58522 -#define RESERVED_PIDS 300
58523 +#define RESERVED_PIDS 500
58524
58525 int pid_max_min = RESERVED_PIDS + 1;
58526 int pid_max_max = PID_MAX_LIMIT;
58527 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58528 */
58529 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58530 {
58531 + struct task_struct *task;
58532 +
58533 rcu_lockdep_assert(rcu_read_lock_held());
58534 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58535 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58536 +
58537 + if (gr_pid_is_chrooted(task))
58538 + return NULL;
58539 +
58540 + return task;
58541 }
58542
58543 struct task_struct *find_task_by_vpid(pid_t vnr)
58544 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58545 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58546 }
58547
58548 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58549 +{
58550 + rcu_lockdep_assert(rcu_read_lock_held());
58551 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58552 +}
58553 +
58554 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58555 {
58556 struct pid *pid;
58557 diff -urNp linux-2.6.39.4/kernel/posix-cpu-timers.c linux-2.6.39.4/kernel/posix-cpu-timers.c
58558 --- linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-05-19 00:06:34.000000000 -0400
58559 +++ linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-08-06 09:34:48.000000000 -0400
58560 @@ -6,6 +6,7 @@
58561 #include <linux/posix-timers.h>
58562 #include <linux/errno.h>
58563 #include <linux/math64.h>
58564 +#include <linux/security.h>
58565 #include <asm/uaccess.h>
58566 #include <linux/kernel_stat.h>
58567 #include <trace/events/timer.h>
58568 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58569
58570 static __init int init_posix_cpu_timers(void)
58571 {
58572 - struct k_clock process = {
58573 + static struct k_clock process = {
58574 .clock_getres = process_cpu_clock_getres,
58575 .clock_get = process_cpu_clock_get,
58576 .timer_create = process_cpu_timer_create,
58577 .nsleep = process_cpu_nsleep,
58578 .nsleep_restart = process_cpu_nsleep_restart,
58579 };
58580 - struct k_clock thread = {
58581 + static struct k_clock thread = {
58582 .clock_getres = thread_cpu_clock_getres,
58583 .clock_get = thread_cpu_clock_get,
58584 .timer_create = thread_cpu_timer_create,
58585 diff -urNp linux-2.6.39.4/kernel/posix-timers.c linux-2.6.39.4/kernel/posix-timers.c
58586 --- linux-2.6.39.4/kernel/posix-timers.c 2011-05-19 00:06:34.000000000 -0400
58587 +++ linux-2.6.39.4/kernel/posix-timers.c 2011-08-06 09:30:46.000000000 -0400
58588 @@ -43,6 +43,7 @@
58589 #include <linux/idr.h>
58590 #include <linux/posix-clock.h>
58591 #include <linux/posix-timers.h>
58592 +#include <linux/grsecurity.h>
58593 #include <linux/syscalls.h>
58594 #include <linux/wait.h>
58595 #include <linux/workqueue.h>
58596 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58597 * which we beg off on and pass to do_sys_settimeofday().
58598 */
58599
58600 -static struct k_clock posix_clocks[MAX_CLOCKS];
58601 +static struct k_clock *posix_clocks[MAX_CLOCKS];
58602
58603 /*
58604 * These ones are defined below.
58605 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58606 */
58607 static __init int init_posix_timers(void)
58608 {
58609 - struct k_clock clock_realtime = {
58610 + static struct k_clock clock_realtime = {
58611 .clock_getres = hrtimer_get_res,
58612 .clock_get = posix_clock_realtime_get,
58613 .clock_set = posix_clock_realtime_set,
58614 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58615 .timer_get = common_timer_get,
58616 .timer_del = common_timer_del,
58617 };
58618 - struct k_clock clock_monotonic = {
58619 + static struct k_clock clock_monotonic = {
58620 .clock_getres = hrtimer_get_res,
58621 .clock_get = posix_ktime_get_ts,
58622 .nsleep = common_nsleep,
58623 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58624 .timer_get = common_timer_get,
58625 .timer_del = common_timer_del,
58626 };
58627 - struct k_clock clock_monotonic_raw = {
58628 + static struct k_clock clock_monotonic_raw = {
58629 .clock_getres = hrtimer_get_res,
58630 .clock_get = posix_get_monotonic_raw,
58631 };
58632 - struct k_clock clock_realtime_coarse = {
58633 + static struct k_clock clock_realtime_coarse = {
58634 .clock_getres = posix_get_coarse_res,
58635 .clock_get = posix_get_realtime_coarse,
58636 };
58637 - struct k_clock clock_monotonic_coarse = {
58638 + static struct k_clock clock_monotonic_coarse = {
58639 .clock_getres = posix_get_coarse_res,
58640 .clock_get = posix_get_monotonic_coarse,
58641 };
58642 - struct k_clock clock_boottime = {
58643 + static struct k_clock clock_boottime = {
58644 .clock_getres = hrtimer_get_res,
58645 .clock_get = posix_get_boottime,
58646 .nsleep = common_nsleep,
58647 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58648 .timer_del = common_timer_del,
58649 };
58650
58651 + pax_track_stack();
58652 +
58653 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58654 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58655 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58656 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58657 return;
58658 }
58659
58660 - posix_clocks[clock_id] = *new_clock;
58661 + posix_clocks[clock_id] = new_clock;
58662 }
58663 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58664
58665 @@ -512,9 +515,9 @@ static struct k_clock *clockid_to_kclock
58666 return (id & CLOCKFD_MASK) == CLOCKFD ?
58667 &clock_posix_dynamic : &clock_posix_cpu;
58668
58669 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58670 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58671 return NULL;
58672 - return &posix_clocks[id];
58673 + return posix_clocks[id];
58674 }
58675
58676 static int common_timer_create(struct k_itimer *new_timer)
58677 @@ -956,6 +959,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58678 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58679 return -EFAULT;
58680
58681 + /* only the CLOCK_REALTIME clock can be set, all other clocks
58682 + have their clock_set fptr set to a nosettime dummy function
58683 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58684 + call common_clock_set, which calls do_sys_settimeofday, which
58685 + we hook
58686 + */
58687 +
58688 return kc->clock_set(which_clock, &new_tp);
58689 }
58690
58691 diff -urNp linux-2.6.39.4/kernel/power/poweroff.c linux-2.6.39.4/kernel/power/poweroff.c
58692 --- linux-2.6.39.4/kernel/power/poweroff.c 2011-05-19 00:06:34.000000000 -0400
58693 +++ linux-2.6.39.4/kernel/power/poweroff.c 2011-08-05 19:44:37.000000000 -0400
58694 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58695 .enable_mask = SYSRQ_ENABLE_BOOT,
58696 };
58697
58698 -static int pm_sysrq_init(void)
58699 +static int __init pm_sysrq_init(void)
58700 {
58701 register_sysrq_key('o', &sysrq_poweroff_op);
58702 return 0;
58703 diff -urNp linux-2.6.39.4/kernel/power/process.c linux-2.6.39.4/kernel/power/process.c
58704 --- linux-2.6.39.4/kernel/power/process.c 2011-05-19 00:06:34.000000000 -0400
58705 +++ linux-2.6.39.4/kernel/power/process.c 2011-08-05 19:44:37.000000000 -0400
58706 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58707 u64 elapsed_csecs64;
58708 unsigned int elapsed_csecs;
58709 bool wakeup = false;
58710 + bool timedout = false;
58711
58712 do_gettimeofday(&start);
58713
58714 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58715
58716 while (true) {
58717 todo = 0;
58718 + if (time_after(jiffies, end_time))
58719 + timedout = true;
58720 read_lock(&tasklist_lock);
58721 do_each_thread(g, p) {
58722 if (frozen(p) || !freezable(p))
58723 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58724 * try_to_stop() after schedule() in ptrace/signal
58725 * stop sees TIF_FREEZE.
58726 */
58727 - if (!task_is_stopped_or_traced(p) &&
58728 - !freezer_should_skip(p))
58729 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58730 todo++;
58731 + if (timedout) {
58732 + printk(KERN_ERR "Task refusing to freeze:\n");
58733 + sched_show_task(p);
58734 + }
58735 + }
58736 } while_each_thread(g, p);
58737 read_unlock(&tasklist_lock);
58738
58739 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58740 todo += wq_busy;
58741 }
58742
58743 - if (!todo || time_after(jiffies, end_time))
58744 + if (!todo || timedout)
58745 break;
58746
58747 if (pm_wakeup_pending()) {
58748 diff -urNp linux-2.6.39.4/kernel/printk.c linux-2.6.39.4/kernel/printk.c
58749 --- linux-2.6.39.4/kernel/printk.c 2011-05-19 00:06:34.000000000 -0400
58750 +++ linux-2.6.39.4/kernel/printk.c 2011-08-05 19:44:37.000000000 -0400
58751 @@ -284,12 +284,17 @@ static int check_syslog_permissions(int
58752 if (from_file && type != SYSLOG_ACTION_OPEN)
58753 return 0;
58754
58755 +#ifdef CONFIG_GRKERNSEC_DMESG
58756 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58757 + return -EPERM;
58758 +#endif
58759 +
58760 if (syslog_action_restricted(type)) {
58761 if (capable(CAP_SYSLOG))
58762 return 0;
58763 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58764 if (capable(CAP_SYS_ADMIN)) {
58765 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58766 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58767 "but no CAP_SYSLOG (deprecated).\n");
58768 return 0;
58769 }
58770 diff -urNp linux-2.6.39.4/kernel/profile.c linux-2.6.39.4/kernel/profile.c
58771 --- linux-2.6.39.4/kernel/profile.c 2011-05-19 00:06:34.000000000 -0400
58772 +++ linux-2.6.39.4/kernel/profile.c 2011-08-05 19:44:37.000000000 -0400
58773 @@ -39,7 +39,7 @@ struct profile_hit {
58774 /* Oprofile timer tick hook */
58775 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58776
58777 -static atomic_t *prof_buffer;
58778 +static atomic_unchecked_t *prof_buffer;
58779 static unsigned long prof_len, prof_shift;
58780
58781 int prof_on __read_mostly;
58782 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
58783 hits[i].pc = 0;
58784 continue;
58785 }
58786 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58787 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58788 hits[i].hits = hits[i].pc = 0;
58789 }
58790 }
58791 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
58792 * Add the current hit(s) and flush the write-queue out
58793 * to the global buffer:
58794 */
58795 - atomic_add(nr_hits, &prof_buffer[pc]);
58796 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58797 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58798 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58799 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58800 hits[i].pc = hits[i].hits = 0;
58801 }
58802 out:
58803 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
58804 if (prof_on != type || !prof_buffer)
58805 return;
58806 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58807 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58808 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58809 }
58810 #endif /* !CONFIG_SMP */
58811 EXPORT_SYMBOL_GPL(profile_hits);
58812 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58813 return -EFAULT;
58814 buf++; p++; count--; read++;
58815 }
58816 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58817 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58818 if (copy_to_user(buf, (void *)pnt, count))
58819 return -EFAULT;
58820 read += count;
58821 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58822 }
58823 #endif
58824 profile_discard_flip_buffers();
58825 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58826 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58827 return count;
58828 }
58829
58830 diff -urNp linux-2.6.39.4/kernel/ptrace.c linux-2.6.39.4/kernel/ptrace.c
58831 --- linux-2.6.39.4/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
58832 +++ linux-2.6.39.4/kernel/ptrace.c 2011-08-05 19:44:37.000000000 -0400
58833 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
58834 return ret;
58835 }
58836
58837 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58838 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58839 + unsigned int log)
58840 {
58841 const struct cred *cred = current_cred(), *tcred;
58842
58843 @@ -143,7 +144,8 @@ int __ptrace_may_access(struct task_stru
58844 cred->gid == tcred->sgid &&
58845 cred->gid == tcred->gid))
58846 goto ok;
58847 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58848 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58849 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58850 goto ok;
58851 rcu_read_unlock();
58852 return -EPERM;
58853 @@ -152,7 +154,9 @@ ok:
58854 smp_rmb();
58855 if (task->mm)
58856 dumpable = get_dumpable(task->mm);
58857 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58858 + if (!dumpable &&
58859 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58860 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58861 return -EPERM;
58862
58863 return security_ptrace_access_check(task, mode);
58864 @@ -162,7 +166,16 @@ bool ptrace_may_access(struct task_struc
58865 {
58866 int err;
58867 task_lock(task);
58868 - err = __ptrace_may_access(task, mode);
58869 + err = __ptrace_may_access(task, mode, 0);
58870 + task_unlock(task);
58871 + return !err;
58872 +}
58873 +
58874 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58875 +{
58876 + int err;
58877 + task_lock(task);
58878 + err = __ptrace_may_access(task, mode, 1);
58879 task_unlock(task);
58880 return !err;
58881 }
58882 @@ -189,7 +202,7 @@ static int ptrace_attach(struct task_str
58883 goto out;
58884
58885 task_lock(task);
58886 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58887 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58888 task_unlock(task);
58889 if (retval)
58890 goto unlock_creds;
58891 @@ -202,7 +215,7 @@ static int ptrace_attach(struct task_str
58892 goto unlock_tasklist;
58893
58894 task->ptrace = PT_PTRACED;
58895 - if (task_ns_capable(task, CAP_SYS_PTRACE))
58896 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58897 task->ptrace |= PT_PTRACE_CAP;
58898
58899 __ptrace_link(task, current);
58900 @@ -362,6 +375,8 @@ int ptrace_readdata(struct task_struct *
58901 {
58902 int copied = 0;
58903
58904 + pax_track_stack();
58905 +
58906 while (len > 0) {
58907 char buf[128];
58908 int this_len, retval;
58909 @@ -373,7 +388,7 @@ int ptrace_readdata(struct task_struct *
58910 break;
58911 return -EIO;
58912 }
58913 - if (copy_to_user(dst, buf, retval))
58914 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
58915 return -EFAULT;
58916 copied += retval;
58917 src += retval;
58918 @@ -387,6 +402,8 @@ int ptrace_writedata(struct task_struct
58919 {
58920 int copied = 0;
58921
58922 + pax_track_stack();
58923 +
58924 while (len > 0) {
58925 char buf[128];
58926 int this_len, retval;
58927 @@ -569,9 +586,11 @@ int ptrace_request(struct task_struct *c
58928 {
58929 int ret = -EIO;
58930 siginfo_t siginfo;
58931 - void __user *datavp = (void __user *) data;
58932 + void __user *datavp = (__force void __user *) data;
58933 unsigned long __user *datalp = datavp;
58934
58935 + pax_track_stack();
58936 +
58937 switch (request) {
58938 case PTRACE_PEEKTEXT:
58939 case PTRACE_PEEKDATA:
58940 @@ -717,14 +736,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
58941 goto out;
58942 }
58943
58944 + if (gr_handle_ptrace(child, request)) {
58945 + ret = -EPERM;
58946 + goto out_put_task_struct;
58947 + }
58948 +
58949 if (request == PTRACE_ATTACH) {
58950 ret = ptrace_attach(child);
58951 /*
58952 * Some architectures need to do book-keeping after
58953 * a ptrace attach.
58954 */
58955 - if (!ret)
58956 + if (!ret) {
58957 arch_ptrace_attach(child);
58958 + gr_audit_ptrace(child);
58959 + }
58960 goto out_put_task_struct;
58961 }
58962
58963 @@ -749,7 +775,7 @@ int generic_ptrace_peekdata(struct task_
58964 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
58965 if (copied != sizeof(tmp))
58966 return -EIO;
58967 - return put_user(tmp, (unsigned long __user *)data);
58968 + return put_user(tmp, (__force unsigned long __user *)data);
58969 }
58970
58971 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
58972 @@ -772,6 +798,8 @@ int compat_ptrace_request(struct task_st
58973 siginfo_t siginfo;
58974 int ret;
58975
58976 + pax_track_stack();
58977 +
58978 switch (request) {
58979 case PTRACE_PEEKTEXT:
58980 case PTRACE_PEEKDATA:
58981 @@ -859,14 +887,21 @@ asmlinkage long compat_sys_ptrace(compat
58982 goto out;
58983 }
58984
58985 + if (gr_handle_ptrace(child, request)) {
58986 + ret = -EPERM;
58987 + goto out_put_task_struct;
58988 + }
58989 +
58990 if (request == PTRACE_ATTACH) {
58991 ret = ptrace_attach(child);
58992 /*
58993 * Some architectures need to do book-keeping after
58994 * a ptrace attach.
58995 */
58996 - if (!ret)
58997 + if (!ret) {
58998 arch_ptrace_attach(child);
58999 + gr_audit_ptrace(child);
59000 + }
59001 goto out_put_task_struct;
59002 }
59003
59004 diff -urNp linux-2.6.39.4/kernel/rcutorture.c linux-2.6.39.4/kernel/rcutorture.c
59005 --- linux-2.6.39.4/kernel/rcutorture.c 2011-05-19 00:06:34.000000000 -0400
59006 +++ linux-2.6.39.4/kernel/rcutorture.c 2011-08-05 19:44:37.000000000 -0400
59007 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
59008 { 0 };
59009 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
59010 { 0 };
59011 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59012 -static atomic_t n_rcu_torture_alloc;
59013 -static atomic_t n_rcu_torture_alloc_fail;
59014 -static atomic_t n_rcu_torture_free;
59015 -static atomic_t n_rcu_torture_mberror;
59016 -static atomic_t n_rcu_torture_error;
59017 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59018 +static atomic_unchecked_t n_rcu_torture_alloc;
59019 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
59020 +static atomic_unchecked_t n_rcu_torture_free;
59021 +static atomic_unchecked_t n_rcu_torture_mberror;
59022 +static atomic_unchecked_t n_rcu_torture_error;
59023 static long n_rcu_torture_boost_ktrerror;
59024 static long n_rcu_torture_boost_rterror;
59025 static long n_rcu_torture_boost_allocerror;
59026 @@ -225,11 +225,11 @@ rcu_torture_alloc(void)
59027
59028 spin_lock_bh(&rcu_torture_lock);
59029 if (list_empty(&rcu_torture_freelist)) {
59030 - atomic_inc(&n_rcu_torture_alloc_fail);
59031 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
59032 spin_unlock_bh(&rcu_torture_lock);
59033 return NULL;
59034 }
59035 - atomic_inc(&n_rcu_torture_alloc);
59036 + atomic_inc_unchecked(&n_rcu_torture_alloc);
59037 p = rcu_torture_freelist.next;
59038 list_del_init(p);
59039 spin_unlock_bh(&rcu_torture_lock);
59040 @@ -242,7 +242,7 @@ rcu_torture_alloc(void)
59041 static void
59042 rcu_torture_free(struct rcu_torture *p)
59043 {
59044 - atomic_inc(&n_rcu_torture_free);
59045 + atomic_inc_unchecked(&n_rcu_torture_free);
59046 spin_lock_bh(&rcu_torture_lock);
59047 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
59048 spin_unlock_bh(&rcu_torture_lock);
59049 @@ -362,7 +362,7 @@ rcu_torture_cb(struct rcu_head *p)
59050 i = rp->rtort_pipe_count;
59051 if (i > RCU_TORTURE_PIPE_LEN)
59052 i = RCU_TORTURE_PIPE_LEN;
59053 - atomic_inc(&rcu_torture_wcount[i]);
59054 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59055 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59056 rp->rtort_mbtest = 0;
59057 rcu_torture_free(rp);
59058 @@ -409,7 +409,7 @@ static void rcu_sync_torture_deferred_fr
59059 i = rp->rtort_pipe_count;
59060 if (i > RCU_TORTURE_PIPE_LEN)
59061 i = RCU_TORTURE_PIPE_LEN;
59062 - atomic_inc(&rcu_torture_wcount[i]);
59063 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59064 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59065 rp->rtort_mbtest = 0;
59066 list_del(&rp->rtort_free);
59067 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
59068 i = old_rp->rtort_pipe_count;
59069 if (i > RCU_TORTURE_PIPE_LEN)
59070 i = RCU_TORTURE_PIPE_LEN;
59071 - atomic_inc(&rcu_torture_wcount[i]);
59072 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59073 old_rp->rtort_pipe_count++;
59074 cur_ops->deferred_free(old_rp);
59075 }
59076 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
59077 return;
59078 }
59079 if (p->rtort_mbtest == 0)
59080 - atomic_inc(&n_rcu_torture_mberror);
59081 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59082 spin_lock(&rand_lock);
59083 cur_ops->read_delay(&rand);
59084 n_rcu_torture_timers++;
59085 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
59086 continue;
59087 }
59088 if (p->rtort_mbtest == 0)
59089 - atomic_inc(&n_rcu_torture_mberror);
59090 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59091 cur_ops->read_delay(&rand);
59092 preempt_disable();
59093 pipe_count = p->rtort_pipe_count;
59094 @@ -1072,10 +1072,10 @@ rcu_torture_printk(char *page)
59095 rcu_torture_current,
59096 rcu_torture_current_version,
59097 list_empty(&rcu_torture_freelist),
59098 - atomic_read(&n_rcu_torture_alloc),
59099 - atomic_read(&n_rcu_torture_alloc_fail),
59100 - atomic_read(&n_rcu_torture_free),
59101 - atomic_read(&n_rcu_torture_mberror),
59102 + atomic_read_unchecked(&n_rcu_torture_alloc),
59103 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
59104 + atomic_read_unchecked(&n_rcu_torture_free),
59105 + atomic_read_unchecked(&n_rcu_torture_mberror),
59106 n_rcu_torture_boost_ktrerror,
59107 n_rcu_torture_boost_rterror,
59108 n_rcu_torture_boost_allocerror,
59109 @@ -1083,7 +1083,7 @@ rcu_torture_printk(char *page)
59110 n_rcu_torture_boost_failure,
59111 n_rcu_torture_boosts,
59112 n_rcu_torture_timers);
59113 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
59114 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
59115 n_rcu_torture_boost_ktrerror != 0 ||
59116 n_rcu_torture_boost_rterror != 0 ||
59117 n_rcu_torture_boost_allocerror != 0 ||
59118 @@ -1093,7 +1093,7 @@ rcu_torture_printk(char *page)
59119 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
59120 if (i > 1) {
59121 cnt += sprintf(&page[cnt], "!!! ");
59122 - atomic_inc(&n_rcu_torture_error);
59123 + atomic_inc_unchecked(&n_rcu_torture_error);
59124 WARN_ON_ONCE(1);
59125 }
59126 cnt += sprintf(&page[cnt], "Reader Pipe: ");
59127 @@ -1107,7 +1107,7 @@ rcu_torture_printk(char *page)
59128 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
59129 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59130 cnt += sprintf(&page[cnt], " %d",
59131 - atomic_read(&rcu_torture_wcount[i]));
59132 + atomic_read_unchecked(&rcu_torture_wcount[i]));
59133 }
59134 cnt += sprintf(&page[cnt], "\n");
59135 if (cur_ops->stats)
59136 @@ -1415,7 +1415,7 @@ rcu_torture_cleanup(void)
59137
59138 if (cur_ops->cleanup)
59139 cur_ops->cleanup();
59140 - if (atomic_read(&n_rcu_torture_error))
59141 + if (atomic_read_unchecked(&n_rcu_torture_error))
59142 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
59143 else
59144 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
59145 @@ -1479,11 +1479,11 @@ rcu_torture_init(void)
59146
59147 rcu_torture_current = NULL;
59148 rcu_torture_current_version = 0;
59149 - atomic_set(&n_rcu_torture_alloc, 0);
59150 - atomic_set(&n_rcu_torture_alloc_fail, 0);
59151 - atomic_set(&n_rcu_torture_free, 0);
59152 - atomic_set(&n_rcu_torture_mberror, 0);
59153 - atomic_set(&n_rcu_torture_error, 0);
59154 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
59155 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
59156 + atomic_set_unchecked(&n_rcu_torture_free, 0);
59157 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
59158 + atomic_set_unchecked(&n_rcu_torture_error, 0);
59159 n_rcu_torture_boost_ktrerror = 0;
59160 n_rcu_torture_boost_rterror = 0;
59161 n_rcu_torture_boost_allocerror = 0;
59162 @@ -1491,7 +1491,7 @@ rcu_torture_init(void)
59163 n_rcu_torture_boost_failure = 0;
59164 n_rcu_torture_boosts = 0;
59165 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
59166 - atomic_set(&rcu_torture_wcount[i], 0);
59167 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
59168 for_each_possible_cpu(cpu) {
59169 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59170 per_cpu(rcu_torture_count, cpu)[i] = 0;
59171 diff -urNp linux-2.6.39.4/kernel/rcutree.c linux-2.6.39.4/kernel/rcutree.c
59172 --- linux-2.6.39.4/kernel/rcutree.c 2011-05-19 00:06:34.000000000 -0400
59173 +++ linux-2.6.39.4/kernel/rcutree.c 2011-08-05 19:44:37.000000000 -0400
59174 @@ -1389,7 +1389,7 @@ __rcu_process_callbacks(struct rcu_state
59175 /*
59176 * Do softirq processing for the current CPU.
59177 */
59178 -static void rcu_process_callbacks(struct softirq_action *unused)
59179 +static void rcu_process_callbacks(void)
59180 {
59181 /*
59182 * Memory references from any prior RCU read-side critical sections
59183 diff -urNp linux-2.6.39.4/kernel/rcutree_plugin.h linux-2.6.39.4/kernel/rcutree_plugin.h
59184 --- linux-2.6.39.4/kernel/rcutree_plugin.h 2011-05-19 00:06:34.000000000 -0400
59185 +++ linux-2.6.39.4/kernel/rcutree_plugin.h 2011-08-05 19:44:37.000000000 -0400
59186 @@ -730,7 +730,7 @@ void synchronize_rcu_expedited(void)
59187
59188 /* Clean up and exit. */
59189 smp_mb(); /* ensure expedited GP seen before counter increment. */
59190 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
59191 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
59192 unlock_mb_ret:
59193 mutex_unlock(&sync_rcu_preempt_exp_mutex);
59194 mb_ret:
59195 @@ -1025,8 +1025,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
59196
59197 #else /* #ifndef CONFIG_SMP */
59198
59199 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
59200 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
59201 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
59202 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
59203
59204 static int synchronize_sched_expedited_cpu_stop(void *data)
59205 {
59206 @@ -1081,7 +1081,7 @@ void synchronize_sched_expedited(void)
59207 int firstsnap, s, snap, trycount = 0;
59208
59209 /* Note that atomic_inc_return() implies full memory barrier. */
59210 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
59211 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
59212 get_online_cpus();
59213
59214 /*
59215 @@ -1102,7 +1102,7 @@ void synchronize_sched_expedited(void)
59216 }
59217
59218 /* Check to see if someone else did our work for us. */
59219 - s = atomic_read(&sync_sched_expedited_done);
59220 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59221 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
59222 smp_mb(); /* ensure test happens before caller kfree */
59223 return;
59224 @@ -1117,7 +1117,7 @@ void synchronize_sched_expedited(void)
59225 * grace period works for us.
59226 */
59227 get_online_cpus();
59228 - snap = atomic_read(&sync_sched_expedited_started) - 1;
59229 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
59230 smp_mb(); /* ensure read is before try_stop_cpus(). */
59231 }
59232
59233 @@ -1128,12 +1128,12 @@ void synchronize_sched_expedited(void)
59234 * than we did beat us to the punch.
59235 */
59236 do {
59237 - s = atomic_read(&sync_sched_expedited_done);
59238 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59239 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
59240 smp_mb(); /* ensure test happens before caller kfree */
59241 break;
59242 }
59243 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
59244 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59245
59246 put_online_cpus();
59247 }
59248 diff -urNp linux-2.6.39.4/kernel/relay.c linux-2.6.39.4/kernel/relay.c
59249 --- linux-2.6.39.4/kernel/relay.c 2011-05-19 00:06:34.000000000 -0400
59250 +++ linux-2.6.39.4/kernel/relay.c 2011-08-05 19:44:37.000000000 -0400
59251 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59252 };
59253 ssize_t ret;
59254
59255 + pax_track_stack();
59256 +
59257 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59258 return 0;
59259 if (splice_grow_spd(pipe, &spd))
59260 diff -urNp linux-2.6.39.4/kernel/resource.c linux-2.6.39.4/kernel/resource.c
59261 --- linux-2.6.39.4/kernel/resource.c 2011-05-19 00:06:34.000000000 -0400
59262 +++ linux-2.6.39.4/kernel/resource.c 2011-08-05 19:44:37.000000000 -0400
59263 @@ -133,8 +133,18 @@ static const struct file_operations proc
59264
59265 static int __init ioresources_init(void)
59266 {
59267 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59268 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59269 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59270 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59271 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59272 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59273 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59274 +#endif
59275 +#else
59276 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59277 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59278 +#endif
59279 return 0;
59280 }
59281 __initcall(ioresources_init);
59282 diff -urNp linux-2.6.39.4/kernel/rtmutex-tester.c linux-2.6.39.4/kernel/rtmutex-tester.c
59283 --- linux-2.6.39.4/kernel/rtmutex-tester.c 2011-05-19 00:06:34.000000000 -0400
59284 +++ linux-2.6.39.4/kernel/rtmutex-tester.c 2011-08-05 19:44:37.000000000 -0400
59285 @@ -20,7 +20,7 @@
59286 #define MAX_RT_TEST_MUTEXES 8
59287
59288 static spinlock_t rttest_lock;
59289 -static atomic_t rttest_event;
59290 +static atomic_unchecked_t rttest_event;
59291
59292 struct test_thread_data {
59293 int opcode;
59294 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59295
59296 case RTTEST_LOCKCONT:
59297 td->mutexes[td->opdata] = 1;
59298 - td->event = atomic_add_return(1, &rttest_event);
59299 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59300 return 0;
59301
59302 case RTTEST_RESET:
59303 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59304 return 0;
59305
59306 case RTTEST_RESETEVENT:
59307 - atomic_set(&rttest_event, 0);
59308 + atomic_set_unchecked(&rttest_event, 0);
59309 return 0;
59310
59311 default:
59312 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59313 return ret;
59314
59315 td->mutexes[id] = 1;
59316 - td->event = atomic_add_return(1, &rttest_event);
59317 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59318 rt_mutex_lock(&mutexes[id]);
59319 - td->event = atomic_add_return(1, &rttest_event);
59320 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59321 td->mutexes[id] = 4;
59322 return 0;
59323
59324 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59325 return ret;
59326
59327 td->mutexes[id] = 1;
59328 - td->event = atomic_add_return(1, &rttest_event);
59329 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59330 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59331 - td->event = atomic_add_return(1, &rttest_event);
59332 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59333 td->mutexes[id] = ret ? 0 : 4;
59334 return ret ? -EINTR : 0;
59335
59336 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59337 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59338 return ret;
59339
59340 - td->event = atomic_add_return(1, &rttest_event);
59341 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59342 rt_mutex_unlock(&mutexes[id]);
59343 - td->event = atomic_add_return(1, &rttest_event);
59344 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59345 td->mutexes[id] = 0;
59346 return 0;
59347
59348 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59349 break;
59350
59351 td->mutexes[dat] = 2;
59352 - td->event = atomic_add_return(1, &rttest_event);
59353 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59354 break;
59355
59356 default:
59357 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59358 return;
59359
59360 td->mutexes[dat] = 3;
59361 - td->event = atomic_add_return(1, &rttest_event);
59362 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59363 break;
59364
59365 case RTTEST_LOCKNOWAIT:
59366 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59367 return;
59368
59369 td->mutexes[dat] = 1;
59370 - td->event = atomic_add_return(1, &rttest_event);
59371 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59372 return;
59373
59374 default:
59375 diff -urNp linux-2.6.39.4/kernel/sched_autogroup.c linux-2.6.39.4/kernel/sched_autogroup.c
59376 --- linux-2.6.39.4/kernel/sched_autogroup.c 2011-05-19 00:06:34.000000000 -0400
59377 +++ linux-2.6.39.4/kernel/sched_autogroup.c 2011-08-05 19:44:37.000000000 -0400
59378 @@ -7,7 +7,7 @@
59379
59380 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59381 static struct autogroup autogroup_default;
59382 -static atomic_t autogroup_seq_nr;
59383 +static atomic_unchecked_t autogroup_seq_nr;
59384
59385 static void __init autogroup_init(struct task_struct *init_task)
59386 {
59387 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59388
59389 kref_init(&ag->kref);
59390 init_rwsem(&ag->lock);
59391 - ag->id = atomic_inc_return(&autogroup_seq_nr);
59392 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59393 ag->tg = tg;
59394 #ifdef CONFIG_RT_GROUP_SCHED
59395 /*
59396 diff -urNp linux-2.6.39.4/kernel/sched.c linux-2.6.39.4/kernel/sched.c
59397 --- linux-2.6.39.4/kernel/sched.c 2011-05-19 00:06:34.000000000 -0400
59398 +++ linux-2.6.39.4/kernel/sched.c 2011-08-05 19:44:37.000000000 -0400
59399 @@ -4078,6 +4078,8 @@ asmlinkage void __sched schedule(void)
59400 struct rq *rq;
59401 int cpu;
59402
59403 + pax_track_stack();
59404 +
59405 need_resched:
59406 preempt_disable();
59407 cpu = smp_processor_id();
59408 @@ -4165,7 +4167,7 @@ EXPORT_SYMBOL(schedule);
59409 * Look out! "owner" is an entirely speculative pointer
59410 * access and not reliable.
59411 */
59412 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
59413 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
59414 {
59415 unsigned int cpu;
59416 struct rq *rq;
59417 @@ -4179,10 +4181,10 @@ int mutex_spin_on_owner(struct mutex *lo
59418 * DEBUG_PAGEALLOC could have unmapped it if
59419 * the mutex owner just released it and exited.
59420 */
59421 - if (probe_kernel_address(&owner->cpu, cpu))
59422 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
59423 return 0;
59424 #else
59425 - cpu = owner->cpu;
59426 + cpu = task_thread_info(owner)->cpu;
59427 #endif
59428
59429 /*
59430 @@ -4219,7 +4221,7 @@ int mutex_spin_on_owner(struct mutex *lo
59431 /*
59432 * Is that owner really running on that cpu?
59433 */
59434 - if (task_thread_info(rq->curr) != owner || need_resched())
59435 + if (rq->curr != owner || need_resched())
59436 return 0;
59437
59438 arch_mutex_cpu_relax();
59439 @@ -4778,6 +4780,8 @@ int can_nice(const struct task_struct *p
59440 /* convert nice value [19,-20] to rlimit style value [1,40] */
59441 int nice_rlim = 20 - nice;
59442
59443 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59444 +
59445 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59446 capable(CAP_SYS_NICE));
59447 }
59448 @@ -4811,7 +4815,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59449 if (nice > 19)
59450 nice = 19;
59451
59452 - if (increment < 0 && !can_nice(current, nice))
59453 + if (increment < 0 && (!can_nice(current, nice) ||
59454 + gr_handle_chroot_nice()))
59455 return -EPERM;
59456
59457 retval = security_task_setnice(current, nice);
59458 @@ -4957,6 +4962,7 @@ recheck:
59459 unsigned long rlim_rtprio =
59460 task_rlimit(p, RLIMIT_RTPRIO);
59461
59462 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59463 /* can't set/change the rt policy */
59464 if (policy != p->policy && !rlim_rtprio)
59465 return -EPERM;
59466 @@ -7164,7 +7170,7 @@ static void init_sched_groups_power(int
59467 long power;
59468 int weight;
59469
59470 - WARN_ON(!sd || !sd->groups);
59471 + BUG_ON(!sd || !sd->groups);
59472
59473 if (cpu != group_first_cpu(sd->groups))
59474 return;
59475 diff -urNp linux-2.6.39.4/kernel/sched_fair.c linux-2.6.39.4/kernel/sched_fair.c
59476 --- linux-2.6.39.4/kernel/sched_fair.c 2011-05-19 00:06:34.000000000 -0400
59477 +++ linux-2.6.39.4/kernel/sched_fair.c 2011-08-05 19:44:37.000000000 -0400
59478 @@ -3999,7 +3999,7 @@ static void nohz_idle_balance(int this_c
59479 * run_rebalance_domains is triggered when needed from the scheduler tick.
59480 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59481 */
59482 -static void run_rebalance_domains(struct softirq_action *h)
59483 +static void run_rebalance_domains(void)
59484 {
59485 int this_cpu = smp_processor_id();
59486 struct rq *this_rq = cpu_rq(this_cpu);
59487 diff -urNp linux-2.6.39.4/kernel/signal.c linux-2.6.39.4/kernel/signal.c
59488 --- linux-2.6.39.4/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
59489 +++ linux-2.6.39.4/kernel/signal.c 2011-08-05 19:44:37.000000000 -0400
59490 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59491
59492 int print_fatal_signals __read_mostly;
59493
59494 -static void __user *sig_handler(struct task_struct *t, int sig)
59495 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
59496 {
59497 return t->sighand->action[sig - 1].sa.sa_handler;
59498 }
59499
59500 -static int sig_handler_ignored(void __user *handler, int sig)
59501 +static int sig_handler_ignored(__sighandler_t handler, int sig)
59502 {
59503 /* Is it explicitly or implicitly ignored? */
59504 return handler == SIG_IGN ||
59505 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59506 static int sig_task_ignored(struct task_struct *t, int sig,
59507 int from_ancestor_ns)
59508 {
59509 - void __user *handler;
59510 + __sighandler_t handler;
59511
59512 handler = sig_handler(t, sig);
59513
59514 @@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
59515 atomic_inc(&user->sigpending);
59516 rcu_read_unlock();
59517
59518 + if (!override_rlimit)
59519 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59520 +
59521 if (override_rlimit ||
59522 atomic_read(&user->sigpending) <=
59523 task_rlimit(t, RLIMIT_SIGPENDING)) {
59524 @@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
59525
59526 int unhandled_signal(struct task_struct *tsk, int sig)
59527 {
59528 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59529 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59530 if (is_global_init(tsk))
59531 return 1;
59532 if (handler != SIG_IGN && handler != SIG_DFL)
59533 @@ -693,6 +696,12 @@ static int check_kill_permission(int sig
59534 }
59535 }
59536
59537 + /* allow glibc communication via tgkill to other threads in our
59538 + thread group */
59539 + if ((info->si_code != SI_TKILL || sig != (SIGRTMIN+1) ||
59540 + task_tgid_vnr(t) != info->si_pid) && gr_handle_signal(t, sig))
59541 + return -EPERM;
59542 +
59543 return security_task_kill(t, info, sig, 0);
59544 }
59545
59546 @@ -1041,7 +1050,7 @@ __group_send_sig_info(int sig, struct si
59547 return send_signal(sig, info, p, 1);
59548 }
59549
59550 -static int
59551 +int
59552 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59553 {
59554 return send_signal(sig, info, t, 0);
59555 @@ -1078,6 +1087,7 @@ force_sig_info(int sig, struct siginfo *
59556 unsigned long int flags;
59557 int ret, blocked, ignored;
59558 struct k_sigaction *action;
59559 + int is_unhandled = 0;
59560
59561 spin_lock_irqsave(&t->sighand->siglock, flags);
59562 action = &t->sighand->action[sig-1];
59563 @@ -1092,9 +1102,18 @@ force_sig_info(int sig, struct siginfo *
59564 }
59565 if (action->sa.sa_handler == SIG_DFL)
59566 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59567 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59568 + is_unhandled = 1;
59569 ret = specific_send_sig_info(sig, info, t);
59570 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59571
59572 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
59573 + normal operation */
59574 + if (is_unhandled) {
59575 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59576 + gr_handle_crash(t, sig);
59577 + }
59578 +
59579 return ret;
59580 }
59581
59582 @@ -1153,8 +1172,11 @@ int group_send_sig_info(int sig, struct
59583 ret = check_kill_permission(sig, info, p);
59584 rcu_read_unlock();
59585
59586 - if (!ret && sig)
59587 + if (!ret && sig) {
59588 ret = do_send_sig_info(sig, info, p, true);
59589 + if (!ret)
59590 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59591 + }
59592
59593 return ret;
59594 }
59595 @@ -1718,6 +1740,8 @@ void ptrace_notify(int exit_code)
59596 {
59597 siginfo_t info;
59598
59599 + pax_track_stack();
59600 +
59601 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59602
59603 memset(&info, 0, sizeof info);
59604 @@ -2393,7 +2417,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59605 int error = -ESRCH;
59606
59607 rcu_read_lock();
59608 - p = find_task_by_vpid(pid);
59609 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59610 + /* allow glibc communication via tgkill to other threads in our
59611 + thread group */
59612 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59613 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
59614 + p = find_task_by_vpid_unrestricted(pid);
59615 + else
59616 +#endif
59617 + p = find_task_by_vpid(pid);
59618 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59619 error = check_kill_permission(sig, info, p);
59620 /*
59621 diff -urNp linux-2.6.39.4/kernel/smp.c linux-2.6.39.4/kernel/smp.c
59622 --- linux-2.6.39.4/kernel/smp.c 2011-05-19 00:06:34.000000000 -0400
59623 +++ linux-2.6.39.4/kernel/smp.c 2011-08-05 19:44:37.000000000 -0400
59624 @@ -583,22 +583,22 @@ int smp_call_function(smp_call_func_t fu
59625 }
59626 EXPORT_SYMBOL(smp_call_function);
59627
59628 -void ipi_call_lock(void)
59629 +void ipi_call_lock(void) __acquires(call_function.lock)
59630 {
59631 raw_spin_lock(&call_function.lock);
59632 }
59633
59634 -void ipi_call_unlock(void)
59635 +void ipi_call_unlock(void) __releases(call_function.lock)
59636 {
59637 raw_spin_unlock(&call_function.lock);
59638 }
59639
59640 -void ipi_call_lock_irq(void)
59641 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
59642 {
59643 raw_spin_lock_irq(&call_function.lock);
59644 }
59645
59646 -void ipi_call_unlock_irq(void)
59647 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
59648 {
59649 raw_spin_unlock_irq(&call_function.lock);
59650 }
59651 diff -urNp linux-2.6.39.4/kernel/softirq.c linux-2.6.39.4/kernel/softirq.c
59652 --- linux-2.6.39.4/kernel/softirq.c 2011-05-19 00:06:34.000000000 -0400
59653 +++ linux-2.6.39.4/kernel/softirq.c 2011-08-05 20:34:06.000000000 -0400
59654 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59655
59656 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59657
59658 -char *softirq_to_name[NR_SOFTIRQS] = {
59659 +const char * const softirq_to_name[NR_SOFTIRQS] = {
59660 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59661 "TASKLET", "SCHED", "HRTIMER", "RCU"
59662 };
59663 @@ -235,7 +235,7 @@ restart:
59664 kstat_incr_softirqs_this_cpu(vec_nr);
59665
59666 trace_softirq_entry(vec_nr);
59667 - h->action(h);
59668 + h->action();
59669 trace_softirq_exit(vec_nr);
59670 if (unlikely(prev_count != preempt_count())) {
59671 printk(KERN_ERR "huh, entered softirq %u %s %p"
59672 @@ -377,9 +377,11 @@ void raise_softirq(unsigned int nr)
59673 local_irq_restore(flags);
59674 }
59675
59676 -void open_softirq(int nr, void (*action)(struct softirq_action *))
59677 +void open_softirq(int nr, void (*action)(void))
59678 {
59679 - softirq_vec[nr].action = action;
59680 + pax_open_kernel();
59681 + *(void **)&softirq_vec[nr].action = action;
59682 + pax_close_kernel();
59683 }
59684
59685 /*
59686 @@ -433,7 +435,7 @@ void __tasklet_hi_schedule_first(struct
59687
59688 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59689
59690 -static void tasklet_action(struct softirq_action *a)
59691 +static void tasklet_action(void)
59692 {
59693 struct tasklet_struct *list;
59694
59695 @@ -468,7 +470,7 @@ static void tasklet_action(struct softir
59696 }
59697 }
59698
59699 -static void tasklet_hi_action(struct softirq_action *a)
59700 +static void tasklet_hi_action(void)
59701 {
59702 struct tasklet_struct *list;
59703
59704 diff -urNp linux-2.6.39.4/kernel/sys.c linux-2.6.39.4/kernel/sys.c
59705 --- linux-2.6.39.4/kernel/sys.c 2011-05-19 00:06:34.000000000 -0400
59706 +++ linux-2.6.39.4/kernel/sys.c 2011-08-05 19:44:37.000000000 -0400
59707 @@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59708 error = -EACCES;
59709 goto out;
59710 }
59711 +
59712 + if (gr_handle_chroot_setpriority(p, niceval)) {
59713 + error = -EACCES;
59714 + goto out;
59715 + }
59716 +
59717 no_nice = security_task_setnice(p, niceval);
59718 if (no_nice) {
59719 error = no_nice;
59720 @@ -538,6 +544,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59721 goto error;
59722 }
59723
59724 + if (gr_check_group_change(new->gid, new->egid, -1))
59725 + goto error;
59726 +
59727 if (rgid != (gid_t) -1 ||
59728 (egid != (gid_t) -1 && egid != old->gid))
59729 new->sgid = new->egid;
59730 @@ -567,6 +576,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59731 old = current_cred();
59732
59733 retval = -EPERM;
59734 +
59735 + if (gr_check_group_change(gid, gid, gid))
59736 + goto error;
59737 +
59738 if (nsown_capable(CAP_SETGID))
59739 new->gid = new->egid = new->sgid = new->fsgid = gid;
59740 else if (gid == old->gid || gid == old->sgid)
59741 @@ -647,6 +660,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59742 goto error;
59743 }
59744
59745 + if (gr_check_user_change(new->uid, new->euid, -1))
59746 + goto error;
59747 +
59748 if (new->uid != old->uid) {
59749 retval = set_user(new);
59750 if (retval < 0)
59751 @@ -691,6 +707,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59752 old = current_cred();
59753
59754 retval = -EPERM;
59755 +
59756 + if (gr_check_crash_uid(uid))
59757 + goto error;
59758 + if (gr_check_user_change(uid, uid, uid))
59759 + goto error;
59760 +
59761 if (nsown_capable(CAP_SETUID)) {
59762 new->suid = new->uid = uid;
59763 if (uid != old->uid) {
59764 @@ -745,6 +767,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59765 goto error;
59766 }
59767
59768 + if (gr_check_user_change(ruid, euid, -1))
59769 + goto error;
59770 +
59771 if (ruid != (uid_t) -1) {
59772 new->uid = ruid;
59773 if (ruid != old->uid) {
59774 @@ -809,6 +834,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59775 goto error;
59776 }
59777
59778 + if (gr_check_group_change(rgid, egid, -1))
59779 + goto error;
59780 +
59781 if (rgid != (gid_t) -1)
59782 new->gid = rgid;
59783 if (egid != (gid_t) -1)
59784 @@ -855,6 +883,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59785 old = current_cred();
59786 old_fsuid = old->fsuid;
59787
59788 + if (gr_check_user_change(-1, -1, uid))
59789 + goto error;
59790 +
59791 if (uid == old->uid || uid == old->euid ||
59792 uid == old->suid || uid == old->fsuid ||
59793 nsown_capable(CAP_SETUID)) {
59794 @@ -865,6 +896,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59795 }
59796 }
59797
59798 +error:
59799 abort_creds(new);
59800 return old_fsuid;
59801
59802 @@ -891,12 +923,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59803 if (gid == old->gid || gid == old->egid ||
59804 gid == old->sgid || gid == old->fsgid ||
59805 nsown_capable(CAP_SETGID)) {
59806 + if (gr_check_group_change(-1, -1, gid))
59807 + goto error;
59808 +
59809 if (gid != old_fsgid) {
59810 new->fsgid = gid;
59811 goto change_okay;
59812 }
59813 }
59814
59815 +error:
59816 abort_creds(new);
59817 return old_fsgid;
59818
59819 @@ -1643,7 +1679,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59820 error = get_dumpable(me->mm);
59821 break;
59822 case PR_SET_DUMPABLE:
59823 - if (arg2 < 0 || arg2 > 1) {
59824 + if (arg2 > 1) {
59825 error = -EINVAL;
59826 break;
59827 }
59828 diff -urNp linux-2.6.39.4/kernel/sysctl.c linux-2.6.39.4/kernel/sysctl.c
59829 --- linux-2.6.39.4/kernel/sysctl.c 2011-05-19 00:06:34.000000000 -0400
59830 +++ linux-2.6.39.4/kernel/sysctl.c 2011-08-05 19:44:37.000000000 -0400
59831 @@ -84,6 +84,13 @@
59832
59833
59834 #if defined(CONFIG_SYSCTL)
59835 +#include <linux/grsecurity.h>
59836 +#include <linux/grinternal.h>
59837 +
59838 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59839 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59840 + const int op);
59841 +extern int gr_handle_chroot_sysctl(const int op);
59842
59843 /* External variables not in a header file. */
59844 extern int sysctl_overcommit_memory;
59845 @@ -196,6 +203,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59846 }
59847
59848 #endif
59849 +extern struct ctl_table grsecurity_table[];
59850
59851 static struct ctl_table root_table[];
59852 static struct ctl_table_root sysctl_table_root;
59853 @@ -225,6 +233,20 @@ extern struct ctl_table epoll_table[];
59854 int sysctl_legacy_va_layout;
59855 #endif
59856
59857 +#ifdef CONFIG_PAX_SOFTMODE
59858 +static ctl_table pax_table[] = {
59859 + {
59860 + .procname = "softmode",
59861 + .data = &pax_softmode,
59862 + .maxlen = sizeof(unsigned int),
59863 + .mode = 0600,
59864 + .proc_handler = &proc_dointvec,
59865 + },
59866 +
59867 + { }
59868 +};
59869 +#endif
59870 +
59871 /* The default sysctl tables: */
59872
59873 static struct ctl_table root_table[] = {
59874 @@ -271,6 +293,22 @@ static int max_extfrag_threshold = 1000;
59875 #endif
59876
59877 static struct ctl_table kern_table[] = {
59878 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59879 + {
59880 + .procname = "grsecurity",
59881 + .mode = 0500,
59882 + .child = grsecurity_table,
59883 + },
59884 +#endif
59885 +
59886 +#ifdef CONFIG_PAX_SOFTMODE
59887 + {
59888 + .procname = "pax",
59889 + .mode = 0500,
59890 + .child = pax_table,
59891 + },
59892 +#endif
59893 +
59894 {
59895 .procname = "sched_child_runs_first",
59896 .data = &sysctl_sched_child_runs_first,
59897 @@ -545,7 +583,7 @@ static struct ctl_table kern_table[] = {
59898 .data = &modprobe_path,
59899 .maxlen = KMOD_PATH_LEN,
59900 .mode = 0644,
59901 - .proc_handler = proc_dostring,
59902 + .proc_handler = proc_dostring_modpriv,
59903 },
59904 {
59905 .procname = "modules_disabled",
59906 @@ -707,16 +745,20 @@ static struct ctl_table kern_table[] = {
59907 .extra1 = &zero,
59908 .extra2 = &one,
59909 },
59910 +#endif
59911 {
59912 .procname = "kptr_restrict",
59913 .data = &kptr_restrict,
59914 .maxlen = sizeof(int),
59915 .mode = 0644,
59916 .proc_handler = proc_dmesg_restrict,
59917 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59918 + .extra1 = &two,
59919 +#else
59920 .extra1 = &zero,
59921 +#endif
59922 .extra2 = &two,
59923 },
59924 -#endif
59925 {
59926 .procname = "ngroups_max",
59927 .data = &ngroups_max,
59928 @@ -1189,6 +1231,13 @@ static struct ctl_table vm_table[] = {
59929 .proc_handler = proc_dointvec_minmax,
59930 .extra1 = &zero,
59931 },
59932 + {
59933 + .procname = "heap_stack_gap",
59934 + .data = &sysctl_heap_stack_gap,
59935 + .maxlen = sizeof(sysctl_heap_stack_gap),
59936 + .mode = 0644,
59937 + .proc_handler = proc_doulongvec_minmax,
59938 + },
59939 #else
59940 {
59941 .procname = "nr_trim_pages",
59942 @@ -1698,6 +1747,17 @@ static int test_perm(int mode, int op)
59943 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
59944 {
59945 int mode;
59946 + int error;
59947 +
59948 + if (table->parent != NULL && table->parent->procname != NULL &&
59949 + table->procname != NULL &&
59950 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
59951 + return -EACCES;
59952 + if (gr_handle_chroot_sysctl(op))
59953 + return -EACCES;
59954 + error = gr_handle_sysctl(table, op);
59955 + if (error)
59956 + return error;
59957
59958 if (root->permissions)
59959 mode = root->permissions(root, current->nsproxy, table);
59960 @@ -2102,6 +2162,16 @@ int proc_dostring(struct ctl_table *tabl
59961 buffer, lenp, ppos);
59962 }
59963
59964 +int proc_dostring_modpriv(struct ctl_table *table, int write,
59965 + void __user *buffer, size_t *lenp, loff_t *ppos)
59966 +{
59967 + if (write && !capable(CAP_SYS_MODULE))
59968 + return -EPERM;
59969 +
59970 + return _proc_do_string(table->data, table->maxlen, write,
59971 + buffer, lenp, ppos);
59972 +}
59973 +
59974 static size_t proc_skip_spaces(char **buf)
59975 {
59976 size_t ret;
59977 @@ -2207,6 +2277,8 @@ static int proc_put_long(void __user **b
59978 len = strlen(tmp);
59979 if (len > *size)
59980 len = *size;
59981 + if (len > sizeof(tmp))
59982 + len = sizeof(tmp);
59983 if (copy_to_user(*buf, tmp, len))
59984 return -EFAULT;
59985 *size -= len;
59986 @@ -2523,8 +2595,11 @@ static int __do_proc_doulongvec_minmax(v
59987 *i = val;
59988 } else {
59989 val = convdiv * (*i) / convmul;
59990 - if (!first)
59991 + if (!first) {
59992 err = proc_put_char(&buffer, &left, '\t');
59993 + if (err)
59994 + break;
59995 + }
59996 err = proc_put_long(&buffer, &left, val, false);
59997 if (err)
59998 break;
59999 @@ -2919,6 +2994,12 @@ int proc_dostring(struct ctl_table *tabl
60000 return -ENOSYS;
60001 }
60002
60003 +int proc_dostring_modpriv(struct ctl_table *table, int write,
60004 + void __user *buffer, size_t *lenp, loff_t *ppos)
60005 +{
60006 + return -ENOSYS;
60007 +}
60008 +
60009 int proc_dointvec(struct ctl_table *table, int write,
60010 void __user *buffer, size_t *lenp, loff_t *ppos)
60011 {
60012 @@ -2975,6 +3056,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
60013 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
60014 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
60015 EXPORT_SYMBOL(proc_dostring);
60016 +EXPORT_SYMBOL(proc_dostring_modpriv);
60017 EXPORT_SYMBOL(proc_doulongvec_minmax);
60018 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
60019 EXPORT_SYMBOL(register_sysctl_table);
60020 diff -urNp linux-2.6.39.4/kernel/sysctl_check.c linux-2.6.39.4/kernel/sysctl_check.c
60021 --- linux-2.6.39.4/kernel/sysctl_check.c 2011-05-19 00:06:34.000000000 -0400
60022 +++ linux-2.6.39.4/kernel/sysctl_check.c 2011-08-05 19:44:37.000000000 -0400
60023 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
60024 set_fail(&fail, table, "Directory with extra2");
60025 } else {
60026 if ((table->proc_handler == proc_dostring) ||
60027 + (table->proc_handler == proc_dostring_modpriv) ||
60028 (table->proc_handler == proc_dointvec) ||
60029 (table->proc_handler == proc_dointvec_minmax) ||
60030 (table->proc_handler == proc_dointvec_jiffies) ||
60031 diff -urNp linux-2.6.39.4/kernel/taskstats.c linux-2.6.39.4/kernel/taskstats.c
60032 --- linux-2.6.39.4/kernel/taskstats.c 2011-07-09 09:18:51.000000000 -0400
60033 +++ linux-2.6.39.4/kernel/taskstats.c 2011-08-05 19:44:37.000000000 -0400
60034 @@ -27,9 +27,12 @@
60035 #include <linux/cgroup.h>
60036 #include <linux/fs.h>
60037 #include <linux/file.h>
60038 +#include <linux/grsecurity.h>
60039 #include <net/genetlink.h>
60040 #include <asm/atomic.h>
60041
60042 +extern int gr_is_taskstats_denied(int pid);
60043 +
60044 /*
60045 * Maximum length of a cpumask that can be specified in
60046 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
60047 @@ -558,6 +561,9 @@ err:
60048
60049 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
60050 {
60051 + if (gr_is_taskstats_denied(current->pid))
60052 + return -EACCES;
60053 +
60054 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
60055 return cmd_attr_register_cpumask(info);
60056 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
60057 diff -urNp linux-2.6.39.4/kernel/time/tick-broadcast.c linux-2.6.39.4/kernel/time/tick-broadcast.c
60058 --- linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-05-19 00:06:34.000000000 -0400
60059 +++ linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-08-05 19:44:37.000000000 -0400
60060 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
60061 * then clear the broadcast bit.
60062 */
60063 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
60064 - int cpu = smp_processor_id();
60065 + cpu = smp_processor_id();
60066
60067 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
60068 tick_broadcast_clear_oneshot(cpu);
60069 diff -urNp linux-2.6.39.4/kernel/time/timekeeping.c linux-2.6.39.4/kernel/time/timekeeping.c
60070 --- linux-2.6.39.4/kernel/time/timekeeping.c 2011-05-19 00:06:34.000000000 -0400
60071 +++ linux-2.6.39.4/kernel/time/timekeeping.c 2011-08-05 19:44:37.000000000 -0400
60072 @@ -14,6 +14,7 @@
60073 #include <linux/init.h>
60074 #include <linux/mm.h>
60075 #include <linux/sched.h>
60076 +#include <linux/grsecurity.h>
60077 #include <linux/syscore_ops.h>
60078 #include <linux/clocksource.h>
60079 #include <linux/jiffies.h>
60080 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
60081 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
60082 return -EINVAL;
60083
60084 + gr_log_timechange();
60085 +
60086 write_seqlock_irqsave(&xtime_lock, flags);
60087
60088 timekeeping_forward_now();
60089 diff -urNp linux-2.6.39.4/kernel/time/timer_list.c linux-2.6.39.4/kernel/time/timer_list.c
60090 --- linux-2.6.39.4/kernel/time/timer_list.c 2011-05-19 00:06:34.000000000 -0400
60091 +++ linux-2.6.39.4/kernel/time/timer_list.c 2011-08-05 19:44:37.000000000 -0400
60092 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
60093
60094 static void print_name_offset(struct seq_file *m, void *sym)
60095 {
60096 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60097 + SEQ_printf(m, "<%p>", NULL);
60098 +#else
60099 char symname[KSYM_NAME_LEN];
60100
60101 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
60102 SEQ_printf(m, "<%pK>", sym);
60103 else
60104 SEQ_printf(m, "%s", symname);
60105 +#endif
60106 }
60107
60108 static void
60109 @@ -112,7 +116,11 @@ next_one:
60110 static void
60111 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
60112 {
60113 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60114 + SEQ_printf(m, " .base: %p\n", NULL);
60115 +#else
60116 SEQ_printf(m, " .base: %pK\n", base);
60117 +#endif
60118 SEQ_printf(m, " .index: %d\n",
60119 base->index);
60120 SEQ_printf(m, " .resolution: %Lu nsecs\n",
60121 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
60122 {
60123 struct proc_dir_entry *pe;
60124
60125 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60126 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
60127 +#else
60128 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
60129 +#endif
60130 if (!pe)
60131 return -ENOMEM;
60132 return 0;
60133 diff -urNp linux-2.6.39.4/kernel/time/timer_stats.c linux-2.6.39.4/kernel/time/timer_stats.c
60134 --- linux-2.6.39.4/kernel/time/timer_stats.c 2011-05-19 00:06:34.000000000 -0400
60135 +++ linux-2.6.39.4/kernel/time/timer_stats.c 2011-08-05 19:44:37.000000000 -0400
60136 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
60137 static unsigned long nr_entries;
60138 static struct entry entries[MAX_ENTRIES];
60139
60140 -static atomic_t overflow_count;
60141 +static atomic_unchecked_t overflow_count;
60142
60143 /*
60144 * The entries are in a hash-table, for fast lookup:
60145 @@ -140,7 +140,7 @@ static void reset_entries(void)
60146 nr_entries = 0;
60147 memset(entries, 0, sizeof(entries));
60148 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
60149 - atomic_set(&overflow_count, 0);
60150 + atomic_set_unchecked(&overflow_count, 0);
60151 }
60152
60153 static struct entry *alloc_entry(void)
60154 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
60155 if (likely(entry))
60156 entry->count++;
60157 else
60158 - atomic_inc(&overflow_count);
60159 + atomic_inc_unchecked(&overflow_count);
60160
60161 out_unlock:
60162 raw_spin_unlock_irqrestore(lock, flags);
60163 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
60164
60165 static void print_name_offset(struct seq_file *m, unsigned long addr)
60166 {
60167 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60168 + seq_printf(m, "<%p>", NULL);
60169 +#else
60170 char symname[KSYM_NAME_LEN];
60171
60172 if (lookup_symbol_name(addr, symname) < 0)
60173 seq_printf(m, "<%p>", (void *)addr);
60174 else
60175 seq_printf(m, "%s", symname);
60176 +#endif
60177 }
60178
60179 static int tstats_show(struct seq_file *m, void *v)
60180 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
60181
60182 seq_puts(m, "Timer Stats Version: v0.2\n");
60183 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
60184 - if (atomic_read(&overflow_count))
60185 + if (atomic_read_unchecked(&overflow_count))
60186 seq_printf(m, "Overflow: %d entries\n",
60187 - atomic_read(&overflow_count));
60188 + atomic_read_unchecked(&overflow_count));
60189
60190 for (i = 0; i < nr_entries; i++) {
60191 entry = entries + i;
60192 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
60193 {
60194 struct proc_dir_entry *pe;
60195
60196 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60197 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
60198 +#else
60199 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
60200 +#endif
60201 if (!pe)
60202 return -ENOMEM;
60203 return 0;
60204 diff -urNp linux-2.6.39.4/kernel/time.c linux-2.6.39.4/kernel/time.c
60205 --- linux-2.6.39.4/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
60206 +++ linux-2.6.39.4/kernel/time.c 2011-08-05 19:44:37.000000000 -0400
60207 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
60208 return error;
60209
60210 if (tz) {
60211 + /* we log in do_settimeofday called below, so don't log twice
60212 + */
60213 + if (!tv)
60214 + gr_log_timechange();
60215 +
60216 /* SMP safe, global irq locking makes it work. */
60217 sys_tz = *tz;
60218 update_vsyscall_tz();
60219 diff -urNp linux-2.6.39.4/kernel/timer.c linux-2.6.39.4/kernel/timer.c
60220 --- linux-2.6.39.4/kernel/timer.c 2011-05-19 00:06:34.000000000 -0400
60221 +++ linux-2.6.39.4/kernel/timer.c 2011-08-05 19:44:37.000000000 -0400
60222 @@ -1305,7 +1305,7 @@ void update_process_times(int user_tick)
60223 /*
60224 * This function runs timers and the timer-tq in bottom half context.
60225 */
60226 -static void run_timer_softirq(struct softirq_action *h)
60227 +static void run_timer_softirq(void)
60228 {
60229 struct tvec_base *base = __this_cpu_read(tvec_bases);
60230
60231 diff -urNp linux-2.6.39.4/kernel/trace/blktrace.c linux-2.6.39.4/kernel/trace/blktrace.c
60232 --- linux-2.6.39.4/kernel/trace/blktrace.c 2011-05-19 00:06:34.000000000 -0400
60233 +++ linux-2.6.39.4/kernel/trace/blktrace.c 2011-08-05 19:44:37.000000000 -0400
60234 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
60235 struct blk_trace *bt = filp->private_data;
60236 char buf[16];
60237
60238 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
60239 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
60240
60241 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
60242 }
60243 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
60244 return 1;
60245
60246 bt = buf->chan->private_data;
60247 - atomic_inc(&bt->dropped);
60248 + atomic_inc_unchecked(&bt->dropped);
60249 return 0;
60250 }
60251
60252 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60253
60254 bt->dir = dir;
60255 bt->dev = dev;
60256 - atomic_set(&bt->dropped, 0);
60257 + atomic_set_unchecked(&bt->dropped, 0);
60258
60259 ret = -EIO;
60260 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60261 diff -urNp linux-2.6.39.4/kernel/trace/ftrace.c linux-2.6.39.4/kernel/trace/ftrace.c
60262 --- linux-2.6.39.4/kernel/trace/ftrace.c 2011-06-03 00:04:14.000000000 -0400
60263 +++ linux-2.6.39.4/kernel/trace/ftrace.c 2011-08-05 20:34:06.000000000 -0400
60264 @@ -1107,13 +1107,18 @@ ftrace_code_disable(struct module *mod,
60265
60266 ip = rec->ip;
60267
60268 + ret = ftrace_arch_code_modify_prepare();
60269 + FTRACE_WARN_ON(ret);
60270 + if (ret)
60271 + return 0;
60272 +
60273 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60274 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60275 if (ret) {
60276 ftrace_bug(ret, ip);
60277 rec->flags |= FTRACE_FL_FAILED;
60278 - return 0;
60279 }
60280 - return 1;
60281 + return ret ? 0 : 1;
60282 }
60283
60284 /*
60285 @@ -2011,7 +2016,7 @@ static void ftrace_free_entry_rcu(struct
60286
60287 int
60288 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60289 - void *data)
60290 + void *data)
60291 {
60292 struct ftrace_func_probe *entry;
60293 struct ftrace_page *pg;
60294 diff -urNp linux-2.6.39.4/kernel/trace/trace.c linux-2.6.39.4/kernel/trace/trace.c
60295 --- linux-2.6.39.4/kernel/trace/trace.c 2011-05-19 00:06:34.000000000 -0400
60296 +++ linux-2.6.39.4/kernel/trace/trace.c 2011-08-05 19:44:37.000000000 -0400
60297 @@ -3330,6 +3330,8 @@ static ssize_t tracing_splice_read_pipe(
60298 size_t rem;
60299 unsigned int i;
60300
60301 + pax_track_stack();
60302 +
60303 if (splice_grow_spd(pipe, &spd))
60304 return -ENOMEM;
60305
60306 @@ -3813,6 +3815,8 @@ tracing_buffers_splice_read(struct file
60307 int entries, size, i;
60308 size_t ret;
60309
60310 + pax_track_stack();
60311 +
60312 if (splice_grow_spd(pipe, &spd))
60313 return -ENOMEM;
60314
60315 @@ -3981,10 +3985,9 @@ static const struct file_operations trac
60316 };
60317 #endif
60318
60319 -static struct dentry *d_tracer;
60320 -
60321 struct dentry *tracing_init_dentry(void)
60322 {
60323 + static struct dentry *d_tracer;
60324 static int once;
60325
60326 if (d_tracer)
60327 @@ -4004,10 +4007,9 @@ struct dentry *tracing_init_dentry(void)
60328 return d_tracer;
60329 }
60330
60331 -static struct dentry *d_percpu;
60332 -
60333 struct dentry *tracing_dentry_percpu(void)
60334 {
60335 + static struct dentry *d_percpu;
60336 static int once;
60337 struct dentry *d_tracer;
60338
60339 diff -urNp linux-2.6.39.4/kernel/trace/trace_events.c linux-2.6.39.4/kernel/trace/trace_events.c
60340 --- linux-2.6.39.4/kernel/trace/trace_events.c 2011-05-19 00:06:34.000000000 -0400
60341 +++ linux-2.6.39.4/kernel/trace/trace_events.c 2011-08-05 20:34:06.000000000 -0400
60342 @@ -1241,10 +1241,6 @@ static LIST_HEAD(ftrace_module_file_list
60343 struct ftrace_module_file_ops {
60344 struct list_head list;
60345 struct module *mod;
60346 - struct file_operations id;
60347 - struct file_operations enable;
60348 - struct file_operations format;
60349 - struct file_operations filter;
60350 };
60351
60352 static struct ftrace_module_file_ops *
60353 @@ -1265,17 +1261,12 @@ trace_create_file_ops(struct module *mod
60354
60355 file_ops->mod = mod;
60356
60357 - file_ops->id = ftrace_event_id_fops;
60358 - file_ops->id.owner = mod;
60359 -
60360 - file_ops->enable = ftrace_enable_fops;
60361 - file_ops->enable.owner = mod;
60362 -
60363 - file_ops->filter = ftrace_event_filter_fops;
60364 - file_ops->filter.owner = mod;
60365 -
60366 - file_ops->format = ftrace_event_format_fops;
60367 - file_ops->format.owner = mod;
60368 + pax_open_kernel();
60369 + *(void **)&mod->trace_id.owner = mod;
60370 + *(void **)&mod->trace_enable.owner = mod;
60371 + *(void **)&mod->trace_filter.owner = mod;
60372 + *(void **)&mod->trace_format.owner = mod;
60373 + pax_close_kernel();
60374
60375 list_add(&file_ops->list, &ftrace_module_file_list);
60376
60377 @@ -1299,8 +1290,8 @@ static void trace_module_add_events(stru
60378
60379 for_each_event(call, start, end) {
60380 __trace_add_event_call(*call, mod,
60381 - &file_ops->id, &file_ops->enable,
60382 - &file_ops->filter, &file_ops->format);
60383 + &mod->trace_id, &mod->trace_enable,
60384 + &mod->trace_filter, &mod->trace_format);
60385 }
60386 }
60387
60388 diff -urNp linux-2.6.39.4/kernel/trace/trace_mmiotrace.c linux-2.6.39.4/kernel/trace/trace_mmiotrace.c
60389 --- linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-05-19 00:06:34.000000000 -0400
60390 +++ linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-08-05 19:44:37.000000000 -0400
60391 @@ -24,7 +24,7 @@ struct header_iter {
60392 static struct trace_array *mmio_trace_array;
60393 static bool overrun_detected;
60394 static unsigned long prev_overruns;
60395 -static atomic_t dropped_count;
60396 +static atomic_unchecked_t dropped_count;
60397
60398 static void mmio_reset_data(struct trace_array *tr)
60399 {
60400 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60401
60402 static unsigned long count_overruns(struct trace_iterator *iter)
60403 {
60404 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
60405 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60406 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60407
60408 if (over > prev_overruns)
60409 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60410 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60411 sizeof(*entry), 0, pc);
60412 if (!event) {
60413 - atomic_inc(&dropped_count);
60414 + atomic_inc_unchecked(&dropped_count);
60415 return;
60416 }
60417 entry = ring_buffer_event_data(event);
60418 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60419 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60420 sizeof(*entry), 0, pc);
60421 if (!event) {
60422 - atomic_inc(&dropped_count);
60423 + atomic_inc_unchecked(&dropped_count);
60424 return;
60425 }
60426 entry = ring_buffer_event_data(event);
60427 diff -urNp linux-2.6.39.4/kernel/trace/trace_output.c linux-2.6.39.4/kernel/trace/trace_output.c
60428 --- linux-2.6.39.4/kernel/trace/trace_output.c 2011-05-19 00:06:34.000000000 -0400
60429 +++ linux-2.6.39.4/kernel/trace/trace_output.c 2011-08-05 19:44:37.000000000 -0400
60430 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60431
60432 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60433 if (!IS_ERR(p)) {
60434 - p = mangle_path(s->buffer + s->len, p, "\n");
60435 + p = mangle_path(s->buffer + s->len, p, "\n\\");
60436 if (p) {
60437 s->len = p - s->buffer;
60438 return 1;
60439 diff -urNp linux-2.6.39.4/kernel/trace/trace_stack.c linux-2.6.39.4/kernel/trace/trace_stack.c
60440 --- linux-2.6.39.4/kernel/trace/trace_stack.c 2011-05-19 00:06:34.000000000 -0400
60441 +++ linux-2.6.39.4/kernel/trace/trace_stack.c 2011-08-05 19:44:37.000000000 -0400
60442 @@ -50,7 +50,7 @@ static inline void check_stack(void)
60443 return;
60444
60445 /* we do not handle interrupt stacks yet */
60446 - if (!object_is_on_stack(&this_size))
60447 + if (!object_starts_on_stack(&this_size))
60448 return;
60449
60450 local_irq_save(flags);
60451 diff -urNp linux-2.6.39.4/kernel/trace/trace_workqueue.c linux-2.6.39.4/kernel/trace/trace_workqueue.c
60452 --- linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-05-19 00:06:34.000000000 -0400
60453 +++ linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-08-05 19:44:37.000000000 -0400
60454 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60455 int cpu;
60456 pid_t pid;
60457 /* Can be inserted from interrupt or user context, need to be atomic */
60458 - atomic_t inserted;
60459 + atomic_unchecked_t inserted;
60460 /*
60461 * Don't need to be atomic, works are serialized in a single workqueue thread
60462 * on a single CPU.
60463 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60464 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60465 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60466 if (node->pid == wq_thread->pid) {
60467 - atomic_inc(&node->inserted);
60468 + atomic_inc_unchecked(&node->inserted);
60469 goto found;
60470 }
60471 }
60472 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60473 tsk = get_pid_task(pid, PIDTYPE_PID);
60474 if (tsk) {
60475 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60476 - atomic_read(&cws->inserted), cws->executed,
60477 + atomic_read_unchecked(&cws->inserted), cws->executed,
60478 tsk->comm);
60479 put_task_struct(tsk);
60480 }
60481 diff -urNp linux-2.6.39.4/lib/bug.c linux-2.6.39.4/lib/bug.c
60482 --- linux-2.6.39.4/lib/bug.c 2011-05-19 00:06:34.000000000 -0400
60483 +++ linux-2.6.39.4/lib/bug.c 2011-08-05 19:44:37.000000000 -0400
60484 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60485 return BUG_TRAP_TYPE_NONE;
60486
60487 bug = find_bug(bugaddr);
60488 + if (!bug)
60489 + return BUG_TRAP_TYPE_NONE;
60490
60491 file = NULL;
60492 line = 0;
60493 diff -urNp linux-2.6.39.4/lib/debugobjects.c linux-2.6.39.4/lib/debugobjects.c
60494 --- linux-2.6.39.4/lib/debugobjects.c 2011-07-09 09:18:51.000000000 -0400
60495 +++ linux-2.6.39.4/lib/debugobjects.c 2011-08-05 19:44:37.000000000 -0400
60496 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60497 if (limit > 4)
60498 return;
60499
60500 - is_on_stack = object_is_on_stack(addr);
60501 + is_on_stack = object_starts_on_stack(addr);
60502 if (is_on_stack == onstack)
60503 return;
60504
60505 diff -urNp linux-2.6.39.4/lib/dma-debug.c linux-2.6.39.4/lib/dma-debug.c
60506 --- linux-2.6.39.4/lib/dma-debug.c 2011-05-19 00:06:34.000000000 -0400
60507 +++ linux-2.6.39.4/lib/dma-debug.c 2011-08-05 19:44:37.000000000 -0400
60508 @@ -862,7 +862,7 @@ out:
60509
60510 static void check_for_stack(struct device *dev, void *addr)
60511 {
60512 - if (object_is_on_stack(addr))
60513 + if (object_starts_on_stack(addr))
60514 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60515 "stack [addr=%p]\n", addr);
60516 }
60517 diff -urNp linux-2.6.39.4/lib/inflate.c linux-2.6.39.4/lib/inflate.c
60518 --- linux-2.6.39.4/lib/inflate.c 2011-05-19 00:06:34.000000000 -0400
60519 +++ linux-2.6.39.4/lib/inflate.c 2011-08-05 19:44:37.000000000 -0400
60520 @@ -269,7 +269,7 @@ static void free(void *where)
60521 malloc_ptr = free_mem_ptr;
60522 }
60523 #else
60524 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60525 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60526 #define free(a) kfree(a)
60527 #endif
60528
60529 diff -urNp linux-2.6.39.4/lib/Kconfig.debug linux-2.6.39.4/lib/Kconfig.debug
60530 --- linux-2.6.39.4/lib/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
60531 +++ linux-2.6.39.4/lib/Kconfig.debug 2011-08-05 19:44:37.000000000 -0400
60532 @@ -1078,6 +1078,7 @@ config LATENCYTOP
60533 depends on DEBUG_KERNEL
60534 depends on STACKTRACE_SUPPORT
60535 depends on PROC_FS
60536 + depends on !GRKERNSEC_HIDESYM
60537 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60538 select KALLSYMS
60539 select KALLSYMS_ALL
60540 diff -urNp linux-2.6.39.4/lib/kref.c linux-2.6.39.4/lib/kref.c
60541 --- linux-2.6.39.4/lib/kref.c 2011-05-19 00:06:34.000000000 -0400
60542 +++ linux-2.6.39.4/lib/kref.c 2011-08-05 19:44:37.000000000 -0400
60543 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60544 */
60545 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60546 {
60547 - WARN_ON(release == NULL);
60548 + BUG_ON(release == NULL);
60549 WARN_ON(release == (void (*)(struct kref *))kfree);
60550
60551 if (atomic_dec_and_test(&kref->refcount)) {
60552 diff -urNp linux-2.6.39.4/lib/radix-tree.c linux-2.6.39.4/lib/radix-tree.c
60553 --- linux-2.6.39.4/lib/radix-tree.c 2011-05-19 00:06:34.000000000 -0400
60554 +++ linux-2.6.39.4/lib/radix-tree.c 2011-08-05 19:44:37.000000000 -0400
60555 @@ -80,7 +80,7 @@ struct radix_tree_preload {
60556 int nr;
60557 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60558 };
60559 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60560 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60561
60562 static inline void *ptr_to_indirect(void *ptr)
60563 {
60564 diff -urNp linux-2.6.39.4/lib/vsprintf.c linux-2.6.39.4/lib/vsprintf.c
60565 --- linux-2.6.39.4/lib/vsprintf.c 2011-05-19 00:06:34.000000000 -0400
60566 +++ linux-2.6.39.4/lib/vsprintf.c 2011-08-05 19:44:37.000000000 -0400
60567 @@ -16,6 +16,9 @@
60568 * - scnprintf and vscnprintf
60569 */
60570
60571 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60572 +#define __INCLUDED_BY_HIDESYM 1
60573 +#endif
60574 #include <stdarg.h>
60575 #include <linux/module.h>
60576 #include <linux/types.h>
60577 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60578 char sym[KSYM_SYMBOL_LEN];
60579 if (ext == 'B')
60580 sprint_backtrace(sym, value);
60581 - else if (ext != 'f' && ext != 's')
60582 + else if (ext != 'f' && ext != 's' && ext != 'a')
60583 sprint_symbol(sym, value);
60584 else
60585 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60586 @@ -797,7 +800,11 @@ char *uuid_string(char *buf, char *end,
60587 return string(buf, end, uuid, spec);
60588 }
60589
60590 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60591 +int kptr_restrict __read_mostly = 2;
60592 +#else
60593 int kptr_restrict __read_mostly;
60594 +#endif
60595
60596 /*
60597 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60598 @@ -811,6 +818,8 @@ int kptr_restrict __read_mostly;
60599 * - 'S' For symbolic direct pointers with offset
60600 * - 's' For symbolic direct pointers without offset
60601 * - 'B' For backtraced symbolic direct pointers with offset
60602 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60603 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60604 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60605 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60606 * - 'M' For a 6-byte MAC address, it prints the address in the
60607 @@ -855,12 +864,12 @@ char *pointer(const char *fmt, char *buf
60608 {
60609 if (!ptr && *fmt != 'K') {
60610 /*
60611 - * Print (null) with the same width as a pointer so it makes
60612 + * Print (nil) with the same width as a pointer so it makes
60613 * tabular output look nice.
60614 */
60615 if (spec.field_width == -1)
60616 spec.field_width = 2 * sizeof(void *);
60617 - return string(buf, end, "(null)", spec);
60618 + return string(buf, end, "(nil)", spec);
60619 }
60620
60621 switch (*fmt) {
60622 @@ -870,6 +879,13 @@ char *pointer(const char *fmt, char *buf
60623 /* Fallthrough */
60624 case 'S':
60625 case 's':
60626 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60627 + break;
60628 +#else
60629 + return symbol_string(buf, end, ptr, spec, *fmt);
60630 +#endif
60631 + case 'A':
60632 + case 'a':
60633 case 'B':
60634 return symbol_string(buf, end, ptr, spec, *fmt);
60635 case 'R':
60636 @@ -1632,11 +1648,11 @@ int bstr_printf(char *buf, size_t size,
60637 typeof(type) value; \
60638 if (sizeof(type) == 8) { \
60639 args = PTR_ALIGN(args, sizeof(u32)); \
60640 - *(u32 *)&value = *(u32 *)args; \
60641 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60642 + *(u32 *)&value = *(const u32 *)args; \
60643 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60644 } else { \
60645 args = PTR_ALIGN(args, sizeof(type)); \
60646 - value = *(typeof(type) *)args; \
60647 + value = *(const typeof(type) *)args; \
60648 } \
60649 args += sizeof(type); \
60650 value; \
60651 @@ -1699,7 +1715,7 @@ int bstr_printf(char *buf, size_t size,
60652 case FORMAT_TYPE_STR: {
60653 const char *str_arg = args;
60654 args += strlen(str_arg) + 1;
60655 - str = string(str, end, (char *)str_arg, spec);
60656 + str = string(str, end, str_arg, spec);
60657 break;
60658 }
60659
60660 diff -urNp linux-2.6.39.4/localversion-grsec linux-2.6.39.4/localversion-grsec
60661 --- linux-2.6.39.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60662 +++ linux-2.6.39.4/localversion-grsec 2011-08-05 19:44:37.000000000 -0400
60663 @@ -0,0 +1 @@
60664 +-grsec
60665 diff -urNp linux-2.6.39.4/Makefile linux-2.6.39.4/Makefile
60666 --- linux-2.6.39.4/Makefile 2011-08-05 21:11:51.000000000 -0400
60667 +++ linux-2.6.39.4/Makefile 2011-08-07 14:17:20.000000000 -0400
60668 @@ -237,8 +237,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60669
60670 HOSTCC = gcc
60671 HOSTCXX = g++
60672 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60673 -HOSTCXXFLAGS = -O2
60674 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60675 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60676 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60677
60678 # Decide whether to build built-in, modular, or both.
60679 # Normally, just do built-in.
60680 @@ -356,10 +357,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60681 KBUILD_CPPFLAGS := -D__KERNEL__
60682
60683 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60684 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
60685 -fno-strict-aliasing -fno-common \
60686 -Werror-implicit-function-declaration \
60687 -Wno-format-security \
60688 -fno-delete-null-pointer-checks
60689 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60690 KBUILD_AFLAGS_KERNEL :=
60691 KBUILD_CFLAGS_KERNEL :=
60692 KBUILD_AFLAGS := -D__ASSEMBLY__
60693 @@ -397,8 +400,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
60694 # Rules shared between *config targets and build targets
60695
60696 # Basic helpers built in scripts/
60697 -PHONY += scripts_basic
60698 -scripts_basic:
60699 +PHONY += scripts_basic gcc-plugins
60700 +scripts_basic: gcc-plugins
60701 $(Q)$(MAKE) $(build)=scripts/basic
60702 $(Q)rm -f .tmp_quiet_recordmcount
60703
60704 @@ -548,6 +551,25 @@ else
60705 KBUILD_CFLAGS += -O2
60706 endif
60707
60708 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60709 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60710 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
60711 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60712 +endif
60713 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60714 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60715 +gcc-plugins:
60716 + $(Q)$(MAKE) $(build)=tools/gcc
60717 +else
60718 +gcc-plugins:
60719 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60720 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60721 +else
60722 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60723 +endif
60724 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60725 +endif
60726 +
60727 include $(srctree)/arch/$(SRCARCH)/Makefile
60728
60729 ifneq ($(CONFIG_FRAME_WARN),0)
60730 @@ -685,7 +707,7 @@ export mod_strip_cmd
60731
60732
60733 ifeq ($(KBUILD_EXTMOD),)
60734 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60735 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60736
60737 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60738 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60739 @@ -947,7 +969,7 @@ ifneq ($(KBUILD_SRC),)
60740 endif
60741
60742 # prepare2 creates a makefile if using a separate output directory
60743 -prepare2: prepare3 outputmakefile
60744 +prepare2: prepare3 outputmakefile gcc-plugins
60745
60746 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60747 include/config/auto.conf
60748 @@ -1375,7 +1397,7 @@ clean: $(clean-dirs)
60749 $(call cmd,rmdirs)
60750 $(call cmd,rmfiles)
60751 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60752 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60753 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60754 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60755 -o -name '*.symtypes' -o -name 'modules.order' \
60756 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60757 diff -urNp linux-2.6.39.4/mm/filemap.c linux-2.6.39.4/mm/filemap.c
60758 --- linux-2.6.39.4/mm/filemap.c 2011-05-19 00:06:34.000000000 -0400
60759 +++ linux-2.6.39.4/mm/filemap.c 2011-08-05 19:44:37.000000000 -0400
60760 @@ -1724,7 +1724,7 @@ int generic_file_mmap(struct file * file
60761 struct address_space *mapping = file->f_mapping;
60762
60763 if (!mapping->a_ops->readpage)
60764 - return -ENOEXEC;
60765 + return -ENODEV;
60766 file_accessed(file);
60767 vma->vm_ops = &generic_file_vm_ops;
60768 vma->vm_flags |= VM_CAN_NONLINEAR;
60769 @@ -2120,6 +2120,7 @@ inline int generic_write_checks(struct f
60770 *pos = i_size_read(inode);
60771
60772 if (limit != RLIM_INFINITY) {
60773 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60774 if (*pos >= limit) {
60775 send_sig(SIGXFSZ, current, 0);
60776 return -EFBIG;
60777 diff -urNp linux-2.6.39.4/mm/fremap.c linux-2.6.39.4/mm/fremap.c
60778 --- linux-2.6.39.4/mm/fremap.c 2011-05-19 00:06:34.000000000 -0400
60779 +++ linux-2.6.39.4/mm/fremap.c 2011-08-05 19:44:37.000000000 -0400
60780 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60781 retry:
60782 vma = find_vma(mm, start);
60783
60784 +#ifdef CONFIG_PAX_SEGMEXEC
60785 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60786 + goto out;
60787 +#endif
60788 +
60789 /*
60790 * Make sure the vma is shared, that it supports prefaulting,
60791 * and that the remapped range is valid and fully within
60792 @@ -224,7 +229,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60793 /*
60794 * drop PG_Mlocked flag for over-mapped range
60795 */
60796 - unsigned int saved_flags = vma->vm_flags;
60797 + unsigned long saved_flags = vma->vm_flags;
60798 munlock_vma_pages_range(vma, start, start + size);
60799 vma->vm_flags = saved_flags;
60800 }
60801 diff -urNp linux-2.6.39.4/mm/highmem.c linux-2.6.39.4/mm/highmem.c
60802 --- linux-2.6.39.4/mm/highmem.c 2011-05-19 00:06:34.000000000 -0400
60803 +++ linux-2.6.39.4/mm/highmem.c 2011-08-05 19:44:37.000000000 -0400
60804 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60805 * So no dangers, even with speculative execution.
60806 */
60807 page = pte_page(pkmap_page_table[i]);
60808 + pax_open_kernel();
60809 pte_clear(&init_mm, (unsigned long)page_address(page),
60810 &pkmap_page_table[i]);
60811 -
60812 + pax_close_kernel();
60813 set_page_address(page, NULL);
60814 need_flush = 1;
60815 }
60816 @@ -186,9 +187,11 @@ start:
60817 }
60818 }
60819 vaddr = PKMAP_ADDR(last_pkmap_nr);
60820 +
60821 + pax_open_kernel();
60822 set_pte_at(&init_mm, vaddr,
60823 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60824 -
60825 + pax_close_kernel();
60826 pkmap_count[last_pkmap_nr] = 1;
60827 set_page_address(page, (void *)vaddr);
60828
60829 diff -urNp linux-2.6.39.4/mm/huge_memory.c linux-2.6.39.4/mm/huge_memory.c
60830 --- linux-2.6.39.4/mm/huge_memory.c 2011-05-19 00:06:34.000000000 -0400
60831 +++ linux-2.6.39.4/mm/huge_memory.c 2011-08-05 19:44:37.000000000 -0400
60832 @@ -702,7 +702,7 @@ out:
60833 * run pte_offset_map on the pmd, if an huge pmd could
60834 * materialize from under us from a different thread.
60835 */
60836 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60837 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60838 return VM_FAULT_OOM;
60839 /* if an huge pmd materialized from under us just retry later */
60840 if (unlikely(pmd_trans_huge(*pmd)))
60841 diff -urNp linux-2.6.39.4/mm/hugetlb.c linux-2.6.39.4/mm/hugetlb.c
60842 --- linux-2.6.39.4/mm/hugetlb.c 2011-07-09 09:18:51.000000000 -0400
60843 +++ linux-2.6.39.4/mm/hugetlb.c 2011-08-05 19:44:37.000000000 -0400
60844 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60845 return 1;
60846 }
60847
60848 +#ifdef CONFIG_PAX_SEGMEXEC
60849 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60850 +{
60851 + struct mm_struct *mm = vma->vm_mm;
60852 + struct vm_area_struct *vma_m;
60853 + unsigned long address_m;
60854 + pte_t *ptep_m;
60855 +
60856 + vma_m = pax_find_mirror_vma(vma);
60857 + if (!vma_m)
60858 + return;
60859 +
60860 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60861 + address_m = address + SEGMEXEC_TASK_SIZE;
60862 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60863 + get_page(page_m);
60864 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
60865 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60866 +}
60867 +#endif
60868 +
60869 /*
60870 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60871 */
60872 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
60873 make_huge_pte(vma, new_page, 1));
60874 page_remove_rmap(old_page);
60875 hugepage_add_new_anon_rmap(new_page, vma, address);
60876 +
60877 +#ifdef CONFIG_PAX_SEGMEXEC
60878 + pax_mirror_huge_pte(vma, address, new_page);
60879 +#endif
60880 +
60881 /* Make the old page be freed below */
60882 new_page = old_page;
60883 mmu_notifier_invalidate_range_end(mm,
60884 @@ -2591,6 +2617,10 @@ retry:
60885 && (vma->vm_flags & VM_SHARED)));
60886 set_huge_pte_at(mm, address, ptep, new_pte);
60887
60888 +#ifdef CONFIG_PAX_SEGMEXEC
60889 + pax_mirror_huge_pte(vma, address, page);
60890 +#endif
60891 +
60892 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60893 /* Optimization, do the COW without a second fault */
60894 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60895 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60896 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60897 struct hstate *h = hstate_vma(vma);
60898
60899 +#ifdef CONFIG_PAX_SEGMEXEC
60900 + struct vm_area_struct *vma_m;
60901 +#endif
60902 +
60903 ptep = huge_pte_offset(mm, address);
60904 if (ptep) {
60905 entry = huge_ptep_get(ptep);
60906 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60907 VM_FAULT_SET_HINDEX(h - hstates);
60908 }
60909
60910 +#ifdef CONFIG_PAX_SEGMEXEC
60911 + vma_m = pax_find_mirror_vma(vma);
60912 + if (vma_m) {
60913 + unsigned long address_m;
60914 +
60915 + if (vma->vm_start > vma_m->vm_start) {
60916 + address_m = address;
60917 + address -= SEGMEXEC_TASK_SIZE;
60918 + vma = vma_m;
60919 + h = hstate_vma(vma);
60920 + } else
60921 + address_m = address + SEGMEXEC_TASK_SIZE;
60922 +
60923 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
60924 + return VM_FAULT_OOM;
60925 + address_m &= HPAGE_MASK;
60926 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
60927 + }
60928 +#endif
60929 +
60930 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
60931 if (!ptep)
60932 return VM_FAULT_OOM;
60933 diff -urNp linux-2.6.39.4/mm/internal.h linux-2.6.39.4/mm/internal.h
60934 --- linux-2.6.39.4/mm/internal.h 2011-05-19 00:06:34.000000000 -0400
60935 +++ linux-2.6.39.4/mm/internal.h 2011-08-05 19:44:37.000000000 -0400
60936 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
60937 * in mm/page_alloc.c
60938 */
60939 extern void __free_pages_bootmem(struct page *page, unsigned int order);
60940 +extern void free_compound_page(struct page *page);
60941 extern void prep_compound_page(struct page *page, unsigned long order);
60942 #ifdef CONFIG_MEMORY_FAILURE
60943 extern bool is_free_buddy_page(struct page *page);
60944 diff -urNp linux-2.6.39.4/mm/Kconfig linux-2.6.39.4/mm/Kconfig
60945 --- linux-2.6.39.4/mm/Kconfig 2011-05-19 00:06:34.000000000 -0400
60946 +++ linux-2.6.39.4/mm/Kconfig 2011-08-05 19:44:37.000000000 -0400
60947 @@ -240,7 +240,7 @@ config KSM
60948 config DEFAULT_MMAP_MIN_ADDR
60949 int "Low address space to protect from user allocation"
60950 depends on MMU
60951 - default 4096
60952 + default 65536
60953 help
60954 This is the portion of low virtual memory which should be protected
60955 from userspace allocation. Keeping a user from writing to low pages
60956 diff -urNp linux-2.6.39.4/mm/kmemleak.c linux-2.6.39.4/mm/kmemleak.c
60957 --- linux-2.6.39.4/mm/kmemleak.c 2011-06-03 00:04:14.000000000 -0400
60958 +++ linux-2.6.39.4/mm/kmemleak.c 2011-08-05 19:44:37.000000000 -0400
60959 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
60960
60961 for (i = 0; i < object->trace_len; i++) {
60962 void *ptr = (void *)object->trace[i];
60963 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
60964 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
60965 }
60966 }
60967
60968 diff -urNp linux-2.6.39.4/mm/maccess.c linux-2.6.39.4/mm/maccess.c
60969 --- linux-2.6.39.4/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
60970 +++ linux-2.6.39.4/mm/maccess.c 2011-08-05 19:44:37.000000000 -0400
60971 @@ -15,10 +15,10 @@
60972 * happens, handle that and return -EFAULT.
60973 */
60974
60975 -long __weak probe_kernel_read(void *dst, void *src, size_t size)
60976 +long __weak probe_kernel_read(void *dst, const void *src, size_t size)
60977 __attribute__((alias("__probe_kernel_read")));
60978
60979 -long __probe_kernel_read(void *dst, void *src, size_t size)
60980 +long __probe_kernel_read(void *dst, const void *src, size_t size)
60981 {
60982 long ret;
60983 mm_segment_t old_fs = get_fs();
60984 @@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
60985 * Safely write to address @dst from the buffer at @src. If a kernel fault
60986 * happens, handle that and return -EFAULT.
60987 */
60988 -long __weak probe_kernel_write(void *dst, void *src, size_t size)
60989 +long __weak probe_kernel_write(void *dst, const void *src, size_t size)
60990 __attribute__((alias("__probe_kernel_write")));
60991
60992 -long __probe_kernel_write(void *dst, void *src, size_t size)
60993 +long __probe_kernel_write(void *dst, const void *src, size_t size)
60994 {
60995 long ret;
60996 mm_segment_t old_fs = get_fs();
60997 diff -urNp linux-2.6.39.4/mm/madvise.c linux-2.6.39.4/mm/madvise.c
60998 --- linux-2.6.39.4/mm/madvise.c 2011-05-19 00:06:34.000000000 -0400
60999 +++ linux-2.6.39.4/mm/madvise.c 2011-08-05 19:44:37.000000000 -0400
61000 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
61001 pgoff_t pgoff;
61002 unsigned long new_flags = vma->vm_flags;
61003
61004 +#ifdef CONFIG_PAX_SEGMEXEC
61005 + struct vm_area_struct *vma_m;
61006 +#endif
61007 +
61008 switch (behavior) {
61009 case MADV_NORMAL:
61010 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
61011 @@ -110,6 +114,13 @@ success:
61012 /*
61013 * vm_flags is protected by the mmap_sem held in write mode.
61014 */
61015 +
61016 +#ifdef CONFIG_PAX_SEGMEXEC
61017 + vma_m = pax_find_mirror_vma(vma);
61018 + if (vma_m)
61019 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
61020 +#endif
61021 +
61022 vma->vm_flags = new_flags;
61023
61024 out:
61025 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
61026 struct vm_area_struct ** prev,
61027 unsigned long start, unsigned long end)
61028 {
61029 +
61030 +#ifdef CONFIG_PAX_SEGMEXEC
61031 + struct vm_area_struct *vma_m;
61032 +#endif
61033 +
61034 *prev = vma;
61035 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
61036 return -EINVAL;
61037 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
61038 zap_page_range(vma, start, end - start, &details);
61039 } else
61040 zap_page_range(vma, start, end - start, NULL);
61041 +
61042 +#ifdef CONFIG_PAX_SEGMEXEC
61043 + vma_m = pax_find_mirror_vma(vma);
61044 + if (vma_m) {
61045 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
61046 + struct zap_details details = {
61047 + .nonlinear_vma = vma_m,
61048 + .last_index = ULONG_MAX,
61049 + };
61050 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
61051 + } else
61052 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
61053 + }
61054 +#endif
61055 +
61056 return 0;
61057 }
61058
61059 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
61060 if (end < start)
61061 goto out;
61062
61063 +#ifdef CONFIG_PAX_SEGMEXEC
61064 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
61065 + if (end > SEGMEXEC_TASK_SIZE)
61066 + goto out;
61067 + } else
61068 +#endif
61069 +
61070 + if (end > TASK_SIZE)
61071 + goto out;
61072 +
61073 error = 0;
61074 if (end == start)
61075 goto out;
61076 diff -urNp linux-2.6.39.4/mm/memory.c linux-2.6.39.4/mm/memory.c
61077 --- linux-2.6.39.4/mm/memory.c 2011-05-19 00:06:34.000000000 -0400
61078 +++ linux-2.6.39.4/mm/memory.c 2011-08-05 19:44:37.000000000 -0400
61079 @@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
61080 return;
61081
61082 pmd = pmd_offset(pud, start);
61083 +
61084 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
61085 pud_clear(pud);
61086 pmd_free_tlb(tlb, pmd, start);
61087 +#endif
61088 +
61089 }
61090
61091 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
61092 @@ -291,9 +295,12 @@ static inline void free_pud_range(struct
61093 if (end - 1 > ceiling - 1)
61094 return;
61095
61096 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
61097 pud = pud_offset(pgd, start);
61098 pgd_clear(pgd);
61099 pud_free_tlb(tlb, pud, start);
61100 +#endif
61101 +
61102 }
61103
61104 /*
61105 @@ -1410,12 +1417,6 @@ no_page_table:
61106 return page;
61107 }
61108
61109 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
61110 -{
61111 - return stack_guard_page_start(vma, addr) ||
61112 - stack_guard_page_end(vma, addr+PAGE_SIZE);
61113 -}
61114 -
61115 /**
61116 * __get_user_pages() - pin user pages in memory
61117 * @tsk: task_struct of target task
61118 @@ -1488,10 +1489,10 @@ int __get_user_pages(struct task_struct
61119 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
61120 i = 0;
61121
61122 - do {
61123 + while (nr_pages) {
61124 struct vm_area_struct *vma;
61125
61126 - vma = find_extend_vma(mm, start);
61127 + vma = find_vma(mm, start);
61128 if (!vma && in_gate_area(mm, start)) {
61129 unsigned long pg = start & PAGE_MASK;
61130 pgd_t *pgd;
61131 @@ -1539,7 +1540,7 @@ int __get_user_pages(struct task_struct
61132 goto next_page;
61133 }
61134
61135 - if (!vma ||
61136 + if (!vma || start < vma->vm_start ||
61137 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
61138 !(vm_flags & vma->vm_flags))
61139 return i ? : -EFAULT;
61140 @@ -1566,11 +1567,6 @@ int __get_user_pages(struct task_struct
61141 int ret;
61142 unsigned int fault_flags = 0;
61143
61144 - /* For mlock, just skip the stack guard page. */
61145 - if (foll_flags & FOLL_MLOCK) {
61146 - if (stack_guard_page(vma, start))
61147 - goto next_page;
61148 - }
61149 if (foll_flags & FOLL_WRITE)
61150 fault_flags |= FAULT_FLAG_WRITE;
61151 if (nonblocking)
61152 @@ -1644,7 +1640,7 @@ next_page:
61153 start += PAGE_SIZE;
61154 nr_pages--;
61155 } while (nr_pages && start < vma->vm_end);
61156 - } while (nr_pages);
61157 + }
61158 return i;
61159 }
61160 EXPORT_SYMBOL(__get_user_pages);
61161 @@ -1795,6 +1791,10 @@ static int insert_page(struct vm_area_st
61162 page_add_file_rmap(page);
61163 set_pte_at(mm, addr, pte, mk_pte(page, prot));
61164
61165 +#ifdef CONFIG_PAX_SEGMEXEC
61166 + pax_mirror_file_pte(vma, addr, page, ptl);
61167 +#endif
61168 +
61169 retval = 0;
61170 pte_unmap_unlock(pte, ptl);
61171 return retval;
61172 @@ -1829,10 +1829,22 @@ out:
61173 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
61174 struct page *page)
61175 {
61176 +
61177 +#ifdef CONFIG_PAX_SEGMEXEC
61178 + struct vm_area_struct *vma_m;
61179 +#endif
61180 +
61181 if (addr < vma->vm_start || addr >= vma->vm_end)
61182 return -EFAULT;
61183 if (!page_count(page))
61184 return -EINVAL;
61185 +
61186 +#ifdef CONFIG_PAX_SEGMEXEC
61187 + vma_m = pax_find_mirror_vma(vma);
61188 + if (vma_m)
61189 + vma_m->vm_flags |= VM_INSERTPAGE;
61190 +#endif
61191 +
61192 vma->vm_flags |= VM_INSERTPAGE;
61193 return insert_page(vma, addr, page, vma->vm_page_prot);
61194 }
61195 @@ -1918,6 +1930,7 @@ int vm_insert_mixed(struct vm_area_struc
61196 unsigned long pfn)
61197 {
61198 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
61199 + BUG_ON(vma->vm_mirror);
61200
61201 if (addr < vma->vm_start || addr >= vma->vm_end)
61202 return -EFAULT;
61203 @@ -2233,6 +2246,186 @@ static inline void cow_user_page(struct
61204 copy_user_highpage(dst, src, va, vma);
61205 }
61206
61207 +#ifdef CONFIG_PAX_SEGMEXEC
61208 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
61209 +{
61210 + struct mm_struct *mm = vma->vm_mm;
61211 + spinlock_t *ptl;
61212 + pte_t *pte, entry;
61213 +
61214 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
61215 + entry = *pte;
61216 + if (!pte_present(entry)) {
61217 + if (!pte_none(entry)) {
61218 + BUG_ON(pte_file(entry));
61219 + free_swap_and_cache(pte_to_swp_entry(entry));
61220 + pte_clear_not_present_full(mm, address, pte, 0);
61221 + }
61222 + } else {
61223 + struct page *page;
61224 +
61225 + flush_cache_page(vma, address, pte_pfn(entry));
61226 + entry = ptep_clear_flush(vma, address, pte);
61227 + BUG_ON(pte_dirty(entry));
61228 + page = vm_normal_page(vma, address, entry);
61229 + if (page) {
61230 + update_hiwater_rss(mm);
61231 + if (PageAnon(page))
61232 + dec_mm_counter_fast(mm, MM_ANONPAGES);
61233 + else
61234 + dec_mm_counter_fast(mm, MM_FILEPAGES);
61235 + page_remove_rmap(page);
61236 + page_cache_release(page);
61237 + }
61238 + }
61239 + pte_unmap_unlock(pte, ptl);
61240 +}
61241 +
61242 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
61243 + *
61244 + * the ptl of the lower mapped page is held on entry and is not released on exit
61245 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
61246 + */
61247 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61248 +{
61249 + struct mm_struct *mm = vma->vm_mm;
61250 + unsigned long address_m;
61251 + spinlock_t *ptl_m;
61252 + struct vm_area_struct *vma_m;
61253 + pmd_t *pmd_m;
61254 + pte_t *pte_m, entry_m;
61255 +
61256 + BUG_ON(!page_m || !PageAnon(page_m));
61257 +
61258 + vma_m = pax_find_mirror_vma(vma);
61259 + if (!vma_m)
61260 + return;
61261 +
61262 + BUG_ON(!PageLocked(page_m));
61263 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61264 + address_m = address + SEGMEXEC_TASK_SIZE;
61265 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61266 + pte_m = pte_offset_map(pmd_m, address_m);
61267 + ptl_m = pte_lockptr(mm, pmd_m);
61268 + if (ptl != ptl_m) {
61269 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61270 + if (!pte_none(*pte_m))
61271 + goto out;
61272 + }
61273 +
61274 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61275 + page_cache_get(page_m);
61276 + page_add_anon_rmap(page_m, vma_m, address_m);
61277 + inc_mm_counter_fast(mm, MM_ANONPAGES);
61278 + set_pte_at(mm, address_m, pte_m, entry_m);
61279 + update_mmu_cache(vma_m, address_m, entry_m);
61280 +out:
61281 + if (ptl != ptl_m)
61282 + spin_unlock(ptl_m);
61283 + pte_unmap(pte_m);
61284 + unlock_page(page_m);
61285 +}
61286 +
61287 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61288 +{
61289 + struct mm_struct *mm = vma->vm_mm;
61290 + unsigned long address_m;
61291 + spinlock_t *ptl_m;
61292 + struct vm_area_struct *vma_m;
61293 + pmd_t *pmd_m;
61294 + pte_t *pte_m, entry_m;
61295 +
61296 + BUG_ON(!page_m || PageAnon(page_m));
61297 +
61298 + vma_m = pax_find_mirror_vma(vma);
61299 + if (!vma_m)
61300 + return;
61301 +
61302 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61303 + address_m = address + SEGMEXEC_TASK_SIZE;
61304 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61305 + pte_m = pte_offset_map(pmd_m, address_m);
61306 + ptl_m = pte_lockptr(mm, pmd_m);
61307 + if (ptl != ptl_m) {
61308 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61309 + if (!pte_none(*pte_m))
61310 + goto out;
61311 + }
61312 +
61313 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61314 + page_cache_get(page_m);
61315 + page_add_file_rmap(page_m);
61316 + inc_mm_counter_fast(mm, MM_FILEPAGES);
61317 + set_pte_at(mm, address_m, pte_m, entry_m);
61318 + update_mmu_cache(vma_m, address_m, entry_m);
61319 +out:
61320 + if (ptl != ptl_m)
61321 + spin_unlock(ptl_m);
61322 + pte_unmap(pte_m);
61323 +}
61324 +
61325 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61326 +{
61327 + struct mm_struct *mm = vma->vm_mm;
61328 + unsigned long address_m;
61329 + spinlock_t *ptl_m;
61330 + struct vm_area_struct *vma_m;
61331 + pmd_t *pmd_m;
61332 + pte_t *pte_m, entry_m;
61333 +
61334 + vma_m = pax_find_mirror_vma(vma);
61335 + if (!vma_m)
61336 + return;
61337 +
61338 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61339 + address_m = address + SEGMEXEC_TASK_SIZE;
61340 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61341 + pte_m = pte_offset_map(pmd_m, address_m);
61342 + ptl_m = pte_lockptr(mm, pmd_m);
61343 + if (ptl != ptl_m) {
61344 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61345 + if (!pte_none(*pte_m))
61346 + goto out;
61347 + }
61348 +
61349 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61350 + set_pte_at(mm, address_m, pte_m, entry_m);
61351 +out:
61352 + if (ptl != ptl_m)
61353 + spin_unlock(ptl_m);
61354 + pte_unmap(pte_m);
61355 +}
61356 +
61357 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61358 +{
61359 + struct page *page_m;
61360 + pte_t entry;
61361 +
61362 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61363 + goto out;
61364 +
61365 + entry = *pte;
61366 + page_m = vm_normal_page(vma, address, entry);
61367 + if (!page_m)
61368 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61369 + else if (PageAnon(page_m)) {
61370 + if (pax_find_mirror_vma(vma)) {
61371 + pte_unmap_unlock(pte, ptl);
61372 + lock_page(page_m);
61373 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61374 + if (pte_same(entry, *pte))
61375 + pax_mirror_anon_pte(vma, address, page_m, ptl);
61376 + else
61377 + unlock_page(page_m);
61378 + }
61379 + } else
61380 + pax_mirror_file_pte(vma, address, page_m, ptl);
61381 +
61382 +out:
61383 + pte_unmap_unlock(pte, ptl);
61384 +}
61385 +#endif
61386 +
61387 /*
61388 * This routine handles present pages, when users try to write
61389 * to a shared page. It is done by copying the page to a new address
61390 @@ -2444,6 +2637,12 @@ gotten:
61391 */
61392 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61393 if (likely(pte_same(*page_table, orig_pte))) {
61394 +
61395 +#ifdef CONFIG_PAX_SEGMEXEC
61396 + if (pax_find_mirror_vma(vma))
61397 + BUG_ON(!trylock_page(new_page));
61398 +#endif
61399 +
61400 if (old_page) {
61401 if (!PageAnon(old_page)) {
61402 dec_mm_counter_fast(mm, MM_FILEPAGES);
61403 @@ -2495,6 +2694,10 @@ gotten:
61404 page_remove_rmap(old_page);
61405 }
61406
61407 +#ifdef CONFIG_PAX_SEGMEXEC
61408 + pax_mirror_anon_pte(vma, address, new_page, ptl);
61409 +#endif
61410 +
61411 /* Free the old page.. */
61412 new_page = old_page;
61413 ret |= VM_FAULT_WRITE;
61414 @@ -2905,6 +3108,11 @@ static int do_swap_page(struct mm_struct
61415 swap_free(entry);
61416 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61417 try_to_free_swap(page);
61418 +
61419 +#ifdef CONFIG_PAX_SEGMEXEC
61420 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61421 +#endif
61422 +
61423 unlock_page(page);
61424 if (swapcache) {
61425 /*
61426 @@ -2928,6 +3136,11 @@ static int do_swap_page(struct mm_struct
61427
61428 /* No need to invalidate - it was non-present before */
61429 update_mmu_cache(vma, address, page_table);
61430 +
61431 +#ifdef CONFIG_PAX_SEGMEXEC
61432 + pax_mirror_anon_pte(vma, address, page, ptl);
61433 +#endif
61434 +
61435 unlock:
61436 pte_unmap_unlock(page_table, ptl);
61437 out:
61438 @@ -2947,40 +3160,6 @@ out_release:
61439 }
61440
61441 /*
61442 - * This is like a special single-page "expand_{down|up}wards()",
61443 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
61444 - * doesn't hit another vma.
61445 - */
61446 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61447 -{
61448 - address &= PAGE_MASK;
61449 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61450 - struct vm_area_struct *prev = vma->vm_prev;
61451 -
61452 - /*
61453 - * Is there a mapping abutting this one below?
61454 - *
61455 - * That's only ok if it's the same stack mapping
61456 - * that has gotten split..
61457 - */
61458 - if (prev && prev->vm_end == address)
61459 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61460 -
61461 - expand_stack(vma, address - PAGE_SIZE);
61462 - }
61463 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61464 - struct vm_area_struct *next = vma->vm_next;
61465 -
61466 - /* As VM_GROWSDOWN but s/below/above/ */
61467 - if (next && next->vm_start == address + PAGE_SIZE)
61468 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61469 -
61470 - expand_upwards(vma, address + PAGE_SIZE);
61471 - }
61472 - return 0;
61473 -}
61474 -
61475 -/*
61476 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61477 * but allow concurrent faults), and pte mapped but not yet locked.
61478 * We return with mmap_sem still held, but pte unmapped and unlocked.
61479 @@ -2989,27 +3168,23 @@ static int do_anonymous_page(struct mm_s
61480 unsigned long address, pte_t *page_table, pmd_t *pmd,
61481 unsigned int flags)
61482 {
61483 - struct page *page;
61484 + struct page *page = NULL;
61485 spinlock_t *ptl;
61486 pte_t entry;
61487
61488 - pte_unmap(page_table);
61489 -
61490 - /* Check if we need to add a guard page to the stack */
61491 - if (check_stack_guard_page(vma, address) < 0)
61492 - return VM_FAULT_SIGBUS;
61493 -
61494 - /* Use the zero-page for reads */
61495 if (!(flags & FAULT_FLAG_WRITE)) {
61496 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61497 vma->vm_page_prot));
61498 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61499 + ptl = pte_lockptr(mm, pmd);
61500 + spin_lock(ptl);
61501 if (!pte_none(*page_table))
61502 goto unlock;
61503 goto setpte;
61504 }
61505
61506 /* Allocate our own private page. */
61507 + pte_unmap(page_table);
61508 +
61509 if (unlikely(anon_vma_prepare(vma)))
61510 goto oom;
61511 page = alloc_zeroed_user_highpage_movable(vma, address);
61512 @@ -3028,6 +3203,11 @@ static int do_anonymous_page(struct mm_s
61513 if (!pte_none(*page_table))
61514 goto release;
61515
61516 +#ifdef CONFIG_PAX_SEGMEXEC
61517 + if (pax_find_mirror_vma(vma))
61518 + BUG_ON(!trylock_page(page));
61519 +#endif
61520 +
61521 inc_mm_counter_fast(mm, MM_ANONPAGES);
61522 page_add_new_anon_rmap(page, vma, address);
61523 setpte:
61524 @@ -3035,6 +3215,12 @@ setpte:
61525
61526 /* No need to invalidate - it was non-present before */
61527 update_mmu_cache(vma, address, page_table);
61528 +
61529 +#ifdef CONFIG_PAX_SEGMEXEC
61530 + if (page)
61531 + pax_mirror_anon_pte(vma, address, page, ptl);
61532 +#endif
61533 +
61534 unlock:
61535 pte_unmap_unlock(page_table, ptl);
61536 return 0;
61537 @@ -3172,6 +3358,12 @@ static int __do_fault(struct mm_struct *
61538 */
61539 /* Only go through if we didn't race with anybody else... */
61540 if (likely(pte_same(*page_table, orig_pte))) {
61541 +
61542 +#ifdef CONFIG_PAX_SEGMEXEC
61543 + if (anon && pax_find_mirror_vma(vma))
61544 + BUG_ON(!trylock_page(page));
61545 +#endif
61546 +
61547 flush_icache_page(vma, page);
61548 entry = mk_pte(page, vma->vm_page_prot);
61549 if (flags & FAULT_FLAG_WRITE)
61550 @@ -3191,6 +3383,14 @@ static int __do_fault(struct mm_struct *
61551
61552 /* no need to invalidate: a not-present page won't be cached */
61553 update_mmu_cache(vma, address, page_table);
61554 +
61555 +#ifdef CONFIG_PAX_SEGMEXEC
61556 + if (anon)
61557 + pax_mirror_anon_pte(vma, address, page, ptl);
61558 + else
61559 + pax_mirror_file_pte(vma, address, page, ptl);
61560 +#endif
61561 +
61562 } else {
61563 if (charged)
61564 mem_cgroup_uncharge_page(page);
61565 @@ -3338,6 +3538,12 @@ int handle_pte_fault(struct mm_struct *m
61566 if (flags & FAULT_FLAG_WRITE)
61567 flush_tlb_fix_spurious_fault(vma, address);
61568 }
61569 +
61570 +#ifdef CONFIG_PAX_SEGMEXEC
61571 + pax_mirror_pte(vma, address, pte, pmd, ptl);
61572 + return 0;
61573 +#endif
61574 +
61575 unlock:
61576 pte_unmap_unlock(pte, ptl);
61577 return 0;
61578 @@ -3354,6 +3560,10 @@ int handle_mm_fault(struct mm_struct *mm
61579 pmd_t *pmd;
61580 pte_t *pte;
61581
61582 +#ifdef CONFIG_PAX_SEGMEXEC
61583 + struct vm_area_struct *vma_m;
61584 +#endif
61585 +
61586 __set_current_state(TASK_RUNNING);
61587
61588 count_vm_event(PGFAULT);
61589 @@ -3364,6 +3574,34 @@ int handle_mm_fault(struct mm_struct *mm
61590 if (unlikely(is_vm_hugetlb_page(vma)))
61591 return hugetlb_fault(mm, vma, address, flags);
61592
61593 +#ifdef CONFIG_PAX_SEGMEXEC
61594 + vma_m = pax_find_mirror_vma(vma);
61595 + if (vma_m) {
61596 + unsigned long address_m;
61597 + pgd_t *pgd_m;
61598 + pud_t *pud_m;
61599 + pmd_t *pmd_m;
61600 +
61601 + if (vma->vm_start > vma_m->vm_start) {
61602 + address_m = address;
61603 + address -= SEGMEXEC_TASK_SIZE;
61604 + vma = vma_m;
61605 + } else
61606 + address_m = address + SEGMEXEC_TASK_SIZE;
61607 +
61608 + pgd_m = pgd_offset(mm, address_m);
61609 + pud_m = pud_alloc(mm, pgd_m, address_m);
61610 + if (!pud_m)
61611 + return VM_FAULT_OOM;
61612 + pmd_m = pmd_alloc(mm, pud_m, address_m);
61613 + if (!pmd_m)
61614 + return VM_FAULT_OOM;
61615 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61616 + return VM_FAULT_OOM;
61617 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61618 + }
61619 +#endif
61620 +
61621 pgd = pgd_offset(mm, address);
61622 pud = pud_alloc(mm, pgd, address);
61623 if (!pud)
61624 @@ -3393,7 +3631,7 @@ int handle_mm_fault(struct mm_struct *mm
61625 * run pte_offset_map on the pmd, if an huge pmd could
61626 * materialize from under us from a different thread.
61627 */
61628 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61629 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61630 return VM_FAULT_OOM;
61631 /* if an huge pmd materialized from under us just retry later */
61632 if (unlikely(pmd_trans_huge(*pmd)))
61633 @@ -3497,7 +3735,7 @@ static int __init gate_vma_init(void)
61634 gate_vma.vm_start = FIXADDR_USER_START;
61635 gate_vma.vm_end = FIXADDR_USER_END;
61636 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61637 - gate_vma.vm_page_prot = __P101;
61638 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61639 /*
61640 * Make sure the vDSO gets into every core dump.
61641 * Dumping its contents makes post-mortem fully interpretable later
61642 diff -urNp linux-2.6.39.4/mm/memory-failure.c linux-2.6.39.4/mm/memory-failure.c
61643 --- linux-2.6.39.4/mm/memory-failure.c 2011-07-09 09:18:51.000000000 -0400
61644 +++ linux-2.6.39.4/mm/memory-failure.c 2011-08-05 19:44:37.000000000 -0400
61645 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61646
61647 int sysctl_memory_failure_recovery __read_mostly = 1;
61648
61649 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61650 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61651
61652 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61653
61654 @@ -1013,7 +1013,7 @@ int __memory_failure(unsigned long pfn,
61655 }
61656
61657 nr_pages = 1 << compound_trans_order(hpage);
61658 - atomic_long_add(nr_pages, &mce_bad_pages);
61659 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61660
61661 /*
61662 * We need/can do nothing about count=0 pages.
61663 @@ -1043,7 +1043,7 @@ int __memory_failure(unsigned long pfn,
61664 if (!PageHWPoison(hpage)
61665 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61666 || (p != hpage && TestSetPageHWPoison(hpage))) {
61667 - atomic_long_sub(nr_pages, &mce_bad_pages);
61668 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61669 return 0;
61670 }
61671 set_page_hwpoison_huge_page(hpage);
61672 @@ -1101,7 +1101,7 @@ int __memory_failure(unsigned long pfn,
61673 }
61674 if (hwpoison_filter(p)) {
61675 if (TestClearPageHWPoison(p))
61676 - atomic_long_sub(nr_pages, &mce_bad_pages);
61677 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61678 unlock_page(hpage);
61679 put_page(hpage);
61680 return 0;
61681 @@ -1227,7 +1227,7 @@ int unpoison_memory(unsigned long pfn)
61682 return 0;
61683 }
61684 if (TestClearPageHWPoison(p))
61685 - atomic_long_sub(nr_pages, &mce_bad_pages);
61686 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61687 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61688 return 0;
61689 }
61690 @@ -1241,7 +1241,7 @@ int unpoison_memory(unsigned long pfn)
61691 */
61692 if (TestClearPageHWPoison(page)) {
61693 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61694 - atomic_long_sub(nr_pages, &mce_bad_pages);
61695 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61696 freeit = 1;
61697 if (PageHuge(page))
61698 clear_page_hwpoison_huge_page(page);
61699 @@ -1354,7 +1354,7 @@ static int soft_offline_huge_page(struct
61700 }
61701 done:
61702 if (!PageHWPoison(hpage))
61703 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61704 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61705 set_page_hwpoison_huge_page(hpage);
61706 dequeue_hwpoisoned_huge_page(hpage);
61707 /* keep elevated page count for bad page */
61708 @@ -1484,7 +1484,7 @@ int soft_offline_page(struct page *page,
61709 return ret;
61710
61711 done:
61712 - atomic_long_add(1, &mce_bad_pages);
61713 + atomic_long_add_unchecked(1, &mce_bad_pages);
61714 SetPageHWPoison(page);
61715 /* keep elevated page count for bad page */
61716 return ret;
61717 diff -urNp linux-2.6.39.4/mm/mempolicy.c linux-2.6.39.4/mm/mempolicy.c
61718 --- linux-2.6.39.4/mm/mempolicy.c 2011-05-19 00:06:34.000000000 -0400
61719 +++ linux-2.6.39.4/mm/mempolicy.c 2011-08-05 19:44:37.000000000 -0400
61720 @@ -643,6 +643,10 @@ static int mbind_range(struct mm_struct
61721 unsigned long vmstart;
61722 unsigned long vmend;
61723
61724 +#ifdef CONFIG_PAX_SEGMEXEC
61725 + struct vm_area_struct *vma_m;
61726 +#endif
61727 +
61728 vma = find_vma_prev(mm, start, &prev);
61729 if (!vma || vma->vm_start > start)
61730 return -EFAULT;
61731 @@ -673,6 +677,16 @@ static int mbind_range(struct mm_struct
61732 err = policy_vma(vma, new_pol);
61733 if (err)
61734 goto out;
61735 +
61736 +#ifdef CONFIG_PAX_SEGMEXEC
61737 + vma_m = pax_find_mirror_vma(vma);
61738 + if (vma_m) {
61739 + err = policy_vma(vma_m, new_pol);
61740 + if (err)
61741 + goto out;
61742 + }
61743 +#endif
61744 +
61745 }
61746
61747 out:
61748 @@ -1106,6 +1120,17 @@ static long do_mbind(unsigned long start
61749
61750 if (end < start)
61751 return -EINVAL;
61752 +
61753 +#ifdef CONFIG_PAX_SEGMEXEC
61754 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61755 + if (end > SEGMEXEC_TASK_SIZE)
61756 + return -EINVAL;
61757 + } else
61758 +#endif
61759 +
61760 + if (end > TASK_SIZE)
61761 + return -EINVAL;
61762 +
61763 if (end == start)
61764 return 0;
61765
61766 @@ -1324,6 +1349,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61767 if (!mm)
61768 goto out;
61769
61770 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61771 + if (mm != current->mm &&
61772 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61773 + err = -EPERM;
61774 + goto out;
61775 + }
61776 +#endif
61777 +
61778 /*
61779 * Check if this process has the right to modify the specified
61780 * process. The right exists if the process has administrative
61781 @@ -1333,8 +1366,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61782 rcu_read_lock();
61783 tcred = __task_cred(task);
61784 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61785 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61786 - !capable(CAP_SYS_NICE)) {
61787 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61788 rcu_read_unlock();
61789 err = -EPERM;
61790 goto out;
61791 @@ -2634,7 +2666,7 @@ int show_numa_map(struct seq_file *m, vo
61792
61793 if (file) {
61794 seq_printf(m, " file=");
61795 - seq_path(m, &file->f_path, "\n\t= ");
61796 + seq_path(m, &file->f_path, "\n\t\\= ");
61797 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
61798 seq_printf(m, " heap");
61799 } else if (vma->vm_start <= mm->start_stack &&
61800 diff -urNp linux-2.6.39.4/mm/migrate.c linux-2.6.39.4/mm/migrate.c
61801 --- linux-2.6.39.4/mm/migrate.c 2011-07-09 09:18:51.000000000 -0400
61802 +++ linux-2.6.39.4/mm/migrate.c 2011-08-05 19:44:37.000000000 -0400
61803 @@ -1133,6 +1133,8 @@ static int do_pages_move(struct mm_struc
61804 unsigned long chunk_start;
61805 int err;
61806
61807 + pax_track_stack();
61808 +
61809 task_nodes = cpuset_mems_allowed(task);
61810
61811 err = -ENOMEM;
61812 @@ -1317,6 +1319,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61813 if (!mm)
61814 return -EINVAL;
61815
61816 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61817 + if (mm != current->mm &&
61818 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61819 + err = -EPERM;
61820 + goto out;
61821 + }
61822 +#endif
61823 +
61824 /*
61825 * Check if this process has the right to modify the specified
61826 * process. The right exists if the process has administrative
61827 @@ -1326,8 +1336,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61828 rcu_read_lock();
61829 tcred = __task_cred(task);
61830 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61831 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61832 - !capable(CAP_SYS_NICE)) {
61833 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61834 rcu_read_unlock();
61835 err = -EPERM;
61836 goto out;
61837 diff -urNp linux-2.6.39.4/mm/mlock.c linux-2.6.39.4/mm/mlock.c
61838 --- linux-2.6.39.4/mm/mlock.c 2011-05-19 00:06:34.000000000 -0400
61839 +++ linux-2.6.39.4/mm/mlock.c 2011-08-05 19:44:37.000000000 -0400
61840 @@ -13,6 +13,7 @@
61841 #include <linux/pagemap.h>
61842 #include <linux/mempolicy.h>
61843 #include <linux/syscalls.h>
61844 +#include <linux/security.h>
61845 #include <linux/sched.h>
61846 #include <linux/module.h>
61847 #include <linux/rmap.h>
61848 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61849 return -EINVAL;
61850 if (end == start)
61851 return 0;
61852 + if (end > TASK_SIZE)
61853 + return -EINVAL;
61854 +
61855 vma = find_vma_prev(current->mm, start, &prev);
61856 if (!vma || vma->vm_start > start)
61857 return -ENOMEM;
61858 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61859 for (nstart = start ; ; ) {
61860 unsigned int newflags;
61861
61862 +#ifdef CONFIG_PAX_SEGMEXEC
61863 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61864 + break;
61865 +#endif
61866 +
61867 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61868
61869 newflags = vma->vm_flags | VM_LOCKED;
61870 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61871 lock_limit >>= PAGE_SHIFT;
61872
61873 /* check against resource limits */
61874 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61875 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61876 error = do_mlock(start, len, 1);
61877 up_write(&current->mm->mmap_sem);
61878 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61879 static int do_mlockall(int flags)
61880 {
61881 struct vm_area_struct * vma, * prev = NULL;
61882 - unsigned int def_flags = 0;
61883
61884 if (flags & MCL_FUTURE)
61885 - def_flags = VM_LOCKED;
61886 - current->mm->def_flags = def_flags;
61887 + current->mm->def_flags |= VM_LOCKED;
61888 + else
61889 + current->mm->def_flags &= ~VM_LOCKED;
61890 if (flags == MCL_FUTURE)
61891 goto out;
61892
61893 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61894 - unsigned int newflags;
61895 + unsigned long newflags;
61896 +
61897 +#ifdef CONFIG_PAX_SEGMEXEC
61898 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61899 + break;
61900 +#endif
61901
61902 + BUG_ON(vma->vm_end > TASK_SIZE);
61903 newflags = vma->vm_flags | VM_LOCKED;
61904 if (!(flags & MCL_CURRENT))
61905 newflags &= ~VM_LOCKED;
61906 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61907 lock_limit >>= PAGE_SHIFT;
61908
61909 ret = -ENOMEM;
61910 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
61911 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
61912 capable(CAP_IPC_LOCK))
61913 ret = do_mlockall(flags);
61914 diff -urNp linux-2.6.39.4/mm/mmap.c linux-2.6.39.4/mm/mmap.c
61915 --- linux-2.6.39.4/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
61916 +++ linux-2.6.39.4/mm/mmap.c 2011-08-05 20:34:06.000000000 -0400
61917 @@ -46,6 +46,16 @@
61918 #define arch_rebalance_pgtables(addr, len) (addr)
61919 #endif
61920
61921 +static inline void verify_mm_writelocked(struct mm_struct *mm)
61922 +{
61923 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
61924 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
61925 + up_read(&mm->mmap_sem);
61926 + BUG();
61927 + }
61928 +#endif
61929 +}
61930 +
61931 static void unmap_region(struct mm_struct *mm,
61932 struct vm_area_struct *vma, struct vm_area_struct *prev,
61933 unsigned long start, unsigned long end);
61934 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
61935 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
61936 *
61937 */
61938 -pgprot_t protection_map[16] = {
61939 +pgprot_t protection_map[16] __read_only = {
61940 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
61941 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
61942 };
61943
61944 pgprot_t vm_get_page_prot(unsigned long vm_flags)
61945 {
61946 - return __pgprot(pgprot_val(protection_map[vm_flags &
61947 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
61948 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
61949 pgprot_val(arch_vm_get_page_prot(vm_flags)));
61950 +
61951 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61952 + if (!(__supported_pte_mask & _PAGE_NX) &&
61953 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
61954 + (vm_flags & (VM_READ | VM_WRITE)))
61955 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
61956 +#endif
61957 +
61958 + return prot;
61959 }
61960 EXPORT_SYMBOL(vm_get_page_prot);
61961
61962 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
61963 int sysctl_overcommit_ratio = 50; /* default is 50% */
61964 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
61965 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
61966 struct percpu_counter vm_committed_as;
61967
61968 /*
61969 @@ -232,6 +252,7 @@ static struct vm_area_struct *remove_vma
61970 struct vm_area_struct *next = vma->vm_next;
61971
61972 might_sleep();
61973 + BUG_ON(vma->vm_mirror);
61974 if (vma->vm_ops && vma->vm_ops->close)
61975 vma->vm_ops->close(vma);
61976 if (vma->vm_file) {
61977 @@ -276,6 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
61978 * not page aligned -Ram Gupta
61979 */
61980 rlim = rlimit(RLIMIT_DATA);
61981 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
61982 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
61983 (mm->end_data - mm->start_data) > rlim)
61984 goto out;
61985 @@ -719,6 +741,12 @@ static int
61986 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
61987 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
61988 {
61989 +
61990 +#ifdef CONFIG_PAX_SEGMEXEC
61991 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
61992 + return 0;
61993 +#endif
61994 +
61995 if (is_mergeable_vma(vma, file, vm_flags) &&
61996 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
61997 if (vma->vm_pgoff == vm_pgoff)
61998 @@ -738,6 +766,12 @@ static int
61999 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
62000 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62001 {
62002 +
62003 +#ifdef CONFIG_PAX_SEGMEXEC
62004 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
62005 + return 0;
62006 +#endif
62007 +
62008 if (is_mergeable_vma(vma, file, vm_flags) &&
62009 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62010 pgoff_t vm_pglen;
62011 @@ -780,13 +814,20 @@ can_vma_merge_after(struct vm_area_struc
62012 struct vm_area_struct *vma_merge(struct mm_struct *mm,
62013 struct vm_area_struct *prev, unsigned long addr,
62014 unsigned long end, unsigned long vm_flags,
62015 - struct anon_vma *anon_vma, struct file *file,
62016 + struct anon_vma *anon_vma, struct file *file,
62017 pgoff_t pgoff, struct mempolicy *policy)
62018 {
62019 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
62020 struct vm_area_struct *area, *next;
62021 int err;
62022
62023 +#ifdef CONFIG_PAX_SEGMEXEC
62024 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
62025 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
62026 +
62027 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
62028 +#endif
62029 +
62030 /*
62031 * We later require that vma->vm_flags == vm_flags,
62032 * so this tests vma->vm_flags & VM_SPECIAL, too.
62033 @@ -802,6 +843,15 @@ struct vm_area_struct *vma_merge(struct
62034 if (next && next->vm_end == end) /* cases 6, 7, 8 */
62035 next = next->vm_next;
62036
62037 +#ifdef CONFIG_PAX_SEGMEXEC
62038 + if (prev)
62039 + prev_m = pax_find_mirror_vma(prev);
62040 + if (area)
62041 + area_m = pax_find_mirror_vma(area);
62042 + if (next)
62043 + next_m = pax_find_mirror_vma(next);
62044 +#endif
62045 +
62046 /*
62047 * Can it merge with the predecessor?
62048 */
62049 @@ -821,9 +871,24 @@ struct vm_area_struct *vma_merge(struct
62050 /* cases 1, 6 */
62051 err = vma_adjust(prev, prev->vm_start,
62052 next->vm_end, prev->vm_pgoff, NULL);
62053 - } else /* cases 2, 5, 7 */
62054 +
62055 +#ifdef CONFIG_PAX_SEGMEXEC
62056 + if (!err && prev_m)
62057 + err = vma_adjust(prev_m, prev_m->vm_start,
62058 + next_m->vm_end, prev_m->vm_pgoff, NULL);
62059 +#endif
62060 +
62061 + } else { /* cases 2, 5, 7 */
62062 err = vma_adjust(prev, prev->vm_start,
62063 end, prev->vm_pgoff, NULL);
62064 +
62065 +#ifdef CONFIG_PAX_SEGMEXEC
62066 + if (!err && prev_m)
62067 + err = vma_adjust(prev_m, prev_m->vm_start,
62068 + end_m, prev_m->vm_pgoff, NULL);
62069 +#endif
62070 +
62071 + }
62072 if (err)
62073 return NULL;
62074 khugepaged_enter_vma_merge(prev);
62075 @@ -837,12 +902,27 @@ struct vm_area_struct *vma_merge(struct
62076 mpol_equal(policy, vma_policy(next)) &&
62077 can_vma_merge_before(next, vm_flags,
62078 anon_vma, file, pgoff+pglen)) {
62079 - if (prev && addr < prev->vm_end) /* case 4 */
62080 + if (prev && addr < prev->vm_end) { /* case 4 */
62081 err = vma_adjust(prev, prev->vm_start,
62082 addr, prev->vm_pgoff, NULL);
62083 - else /* cases 3, 8 */
62084 +
62085 +#ifdef CONFIG_PAX_SEGMEXEC
62086 + if (!err && prev_m)
62087 + err = vma_adjust(prev_m, prev_m->vm_start,
62088 + addr_m, prev_m->vm_pgoff, NULL);
62089 +#endif
62090 +
62091 + } else { /* cases 3, 8 */
62092 err = vma_adjust(area, addr, next->vm_end,
62093 next->vm_pgoff - pglen, NULL);
62094 +
62095 +#ifdef CONFIG_PAX_SEGMEXEC
62096 + if (!err && area_m)
62097 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
62098 + next_m->vm_pgoff - pglen, NULL);
62099 +#endif
62100 +
62101 + }
62102 if (err)
62103 return NULL;
62104 khugepaged_enter_vma_merge(area);
62105 @@ -958,14 +1038,11 @@ none:
62106 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
62107 struct file *file, long pages)
62108 {
62109 - const unsigned long stack_flags
62110 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
62111 -
62112 if (file) {
62113 mm->shared_vm += pages;
62114 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
62115 mm->exec_vm += pages;
62116 - } else if (flags & stack_flags)
62117 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
62118 mm->stack_vm += pages;
62119 if (flags & (VM_RESERVED|VM_IO))
62120 mm->reserved_vm += pages;
62121 @@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
62122 * (the exception is when the underlying filesystem is noexec
62123 * mounted, in which case we dont add PROT_EXEC.)
62124 */
62125 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
62126 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
62127 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
62128 prot |= PROT_EXEC;
62129
62130 @@ -1018,7 +1095,7 @@ unsigned long do_mmap_pgoff(struct file
62131 /* Obtain the address to map to. we verify (or select) it and ensure
62132 * that it represents a valid section of the address space.
62133 */
62134 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
62135 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
62136 if (addr & ~PAGE_MASK)
62137 return addr;
62138
62139 @@ -1029,6 +1106,36 @@ unsigned long do_mmap_pgoff(struct file
62140 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
62141 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
62142
62143 +#ifdef CONFIG_PAX_MPROTECT
62144 + if (mm->pax_flags & MF_PAX_MPROTECT) {
62145 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
62146 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
62147 + gr_log_rwxmmap(file);
62148 +
62149 +#ifdef CONFIG_PAX_EMUPLT
62150 + vm_flags &= ~VM_EXEC;
62151 +#else
62152 + return -EPERM;
62153 +#endif
62154 +
62155 + }
62156 +
62157 + if (!(vm_flags & VM_EXEC))
62158 + vm_flags &= ~VM_MAYEXEC;
62159 +#else
62160 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62161 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62162 +#endif
62163 + else
62164 + vm_flags &= ~VM_MAYWRITE;
62165 + }
62166 +#endif
62167 +
62168 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62169 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
62170 + vm_flags &= ~VM_PAGEEXEC;
62171 +#endif
62172 +
62173 if (flags & MAP_LOCKED)
62174 if (!can_do_mlock())
62175 return -EPERM;
62176 @@ -1040,6 +1147,7 @@ unsigned long do_mmap_pgoff(struct file
62177 locked += mm->locked_vm;
62178 lock_limit = rlimit(RLIMIT_MEMLOCK);
62179 lock_limit >>= PAGE_SHIFT;
62180 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62181 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
62182 return -EAGAIN;
62183 }
62184 @@ -1110,6 +1218,9 @@ unsigned long do_mmap_pgoff(struct file
62185 if (error)
62186 return error;
62187
62188 + if (!gr_acl_handle_mmap(file, prot))
62189 + return -EACCES;
62190 +
62191 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
62192 }
62193 EXPORT_SYMBOL(do_mmap_pgoff);
62194 @@ -1187,10 +1298,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
62195 */
62196 int vma_wants_writenotify(struct vm_area_struct *vma)
62197 {
62198 - unsigned int vm_flags = vma->vm_flags;
62199 + unsigned long vm_flags = vma->vm_flags;
62200
62201 /* If it was private or non-writable, the write bit is already clear */
62202 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
62203 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
62204 return 0;
62205
62206 /* The backer wishes to know when pages are first written to? */
62207 @@ -1239,14 +1350,24 @@ unsigned long mmap_region(struct file *f
62208 unsigned long charged = 0;
62209 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
62210
62211 +#ifdef CONFIG_PAX_SEGMEXEC
62212 + struct vm_area_struct *vma_m = NULL;
62213 +#endif
62214 +
62215 + /*
62216 + * mm->mmap_sem is required to protect against another thread
62217 + * changing the mappings in case we sleep.
62218 + */
62219 + verify_mm_writelocked(mm);
62220 +
62221 /* Clear old maps */
62222 error = -ENOMEM;
62223 -munmap_back:
62224 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62225 if (vma && vma->vm_start < addr + len) {
62226 if (do_munmap(mm, addr, len))
62227 return -ENOMEM;
62228 - goto munmap_back;
62229 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62230 + BUG_ON(vma && vma->vm_start < addr + len);
62231 }
62232
62233 /* Check against address space limit. */
62234 @@ -1295,6 +1416,16 @@ munmap_back:
62235 goto unacct_error;
62236 }
62237
62238 +#ifdef CONFIG_PAX_SEGMEXEC
62239 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
62240 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62241 + if (!vma_m) {
62242 + error = -ENOMEM;
62243 + goto free_vma;
62244 + }
62245 + }
62246 +#endif
62247 +
62248 vma->vm_mm = mm;
62249 vma->vm_start = addr;
62250 vma->vm_end = addr + len;
62251 @@ -1318,6 +1449,19 @@ munmap_back:
62252 error = file->f_op->mmap(file, vma);
62253 if (error)
62254 goto unmap_and_free_vma;
62255 +
62256 +#ifdef CONFIG_PAX_SEGMEXEC
62257 + if (vma_m && (vm_flags & VM_EXECUTABLE))
62258 + added_exe_file_vma(mm);
62259 +#endif
62260 +
62261 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62262 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
62263 + vma->vm_flags |= VM_PAGEEXEC;
62264 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62265 + }
62266 +#endif
62267 +
62268 if (vm_flags & VM_EXECUTABLE)
62269 added_exe_file_vma(mm);
62270
62271 @@ -1353,6 +1497,11 @@ munmap_back:
62272 vma_link(mm, vma, prev, rb_link, rb_parent);
62273 file = vma->vm_file;
62274
62275 +#ifdef CONFIG_PAX_SEGMEXEC
62276 + if (vma_m)
62277 + BUG_ON(pax_mirror_vma(vma_m, vma));
62278 +#endif
62279 +
62280 /* Once vma denies write, undo our temporary denial count */
62281 if (correct_wcount)
62282 atomic_inc(&inode->i_writecount);
62283 @@ -1361,6 +1510,7 @@ out:
62284
62285 mm->total_vm += len >> PAGE_SHIFT;
62286 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62287 + track_exec_limit(mm, addr, addr + len, vm_flags);
62288 if (vm_flags & VM_LOCKED) {
62289 if (!mlock_vma_pages_range(vma, addr, addr + len))
62290 mm->locked_vm += (len >> PAGE_SHIFT);
62291 @@ -1378,6 +1528,12 @@ unmap_and_free_vma:
62292 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62293 charged = 0;
62294 free_vma:
62295 +
62296 +#ifdef CONFIG_PAX_SEGMEXEC
62297 + if (vma_m)
62298 + kmem_cache_free(vm_area_cachep, vma_m);
62299 +#endif
62300 +
62301 kmem_cache_free(vm_area_cachep, vma);
62302 unacct_error:
62303 if (charged)
62304 @@ -1385,6 +1541,44 @@ unacct_error:
62305 return error;
62306 }
62307
62308 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62309 +{
62310 + if (!vma) {
62311 +#ifdef CONFIG_STACK_GROWSUP
62312 + if (addr > sysctl_heap_stack_gap)
62313 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62314 + else
62315 + vma = find_vma(current->mm, 0);
62316 + if (vma && (vma->vm_flags & VM_GROWSUP))
62317 + return false;
62318 +#endif
62319 + return true;
62320 + }
62321 +
62322 + if (addr + len > vma->vm_start)
62323 + return false;
62324 +
62325 + if (vma->vm_flags & VM_GROWSDOWN)
62326 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62327 +#ifdef CONFIG_STACK_GROWSUP
62328 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62329 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62330 +#endif
62331 +
62332 + return true;
62333 +}
62334 +
62335 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62336 +{
62337 + if (vma->vm_start < len)
62338 + return -ENOMEM;
62339 + if (!(vma->vm_flags & VM_GROWSDOWN))
62340 + return vma->vm_start - len;
62341 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
62342 + return vma->vm_start - len - sysctl_heap_stack_gap;
62343 + return -ENOMEM;
62344 +}
62345 +
62346 /* Get an address range which is currently unmapped.
62347 * For shmat() with addr=0.
62348 *
62349 @@ -1411,18 +1605,23 @@ arch_get_unmapped_area(struct file *filp
62350 if (flags & MAP_FIXED)
62351 return addr;
62352
62353 +#ifdef CONFIG_PAX_RANDMMAP
62354 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62355 +#endif
62356 +
62357 if (addr) {
62358 addr = PAGE_ALIGN(addr);
62359 - vma = find_vma(mm, addr);
62360 - if (TASK_SIZE - len >= addr &&
62361 - (!vma || addr + len <= vma->vm_start))
62362 - return addr;
62363 + if (TASK_SIZE - len >= addr) {
62364 + vma = find_vma(mm, addr);
62365 + if (check_heap_stack_gap(vma, addr, len))
62366 + return addr;
62367 + }
62368 }
62369 if (len > mm->cached_hole_size) {
62370 - start_addr = addr = mm->free_area_cache;
62371 + start_addr = addr = mm->free_area_cache;
62372 } else {
62373 - start_addr = addr = TASK_UNMAPPED_BASE;
62374 - mm->cached_hole_size = 0;
62375 + start_addr = addr = mm->mmap_base;
62376 + mm->cached_hole_size = 0;
62377 }
62378
62379 full_search:
62380 @@ -1433,34 +1632,40 @@ full_search:
62381 * Start a new search - just in case we missed
62382 * some holes.
62383 */
62384 - if (start_addr != TASK_UNMAPPED_BASE) {
62385 - addr = TASK_UNMAPPED_BASE;
62386 - start_addr = addr;
62387 + if (start_addr != mm->mmap_base) {
62388 + start_addr = addr = mm->mmap_base;
62389 mm->cached_hole_size = 0;
62390 goto full_search;
62391 }
62392 return -ENOMEM;
62393 }
62394 - if (!vma || addr + len <= vma->vm_start) {
62395 - /*
62396 - * Remember the place where we stopped the search:
62397 - */
62398 - mm->free_area_cache = addr + len;
62399 - return addr;
62400 - }
62401 + if (check_heap_stack_gap(vma, addr, len))
62402 + break;
62403 if (addr + mm->cached_hole_size < vma->vm_start)
62404 mm->cached_hole_size = vma->vm_start - addr;
62405 addr = vma->vm_end;
62406 }
62407 +
62408 + /*
62409 + * Remember the place where we stopped the search:
62410 + */
62411 + mm->free_area_cache = addr + len;
62412 + return addr;
62413 }
62414 #endif
62415
62416 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62417 {
62418 +
62419 +#ifdef CONFIG_PAX_SEGMEXEC
62420 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62421 + return;
62422 +#endif
62423 +
62424 /*
62425 * Is this a new hole at the lowest possible address?
62426 */
62427 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62428 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62429 mm->free_area_cache = addr;
62430 mm->cached_hole_size = ~0UL;
62431 }
62432 @@ -1478,7 +1683,7 @@ arch_get_unmapped_area_topdown(struct fi
62433 {
62434 struct vm_area_struct *vma;
62435 struct mm_struct *mm = current->mm;
62436 - unsigned long addr = addr0;
62437 + unsigned long base = mm->mmap_base, addr = addr0;
62438
62439 /* requested length too big for entire address space */
62440 if (len > TASK_SIZE)
62441 @@ -1487,13 +1692,18 @@ arch_get_unmapped_area_topdown(struct fi
62442 if (flags & MAP_FIXED)
62443 return addr;
62444
62445 +#ifdef CONFIG_PAX_RANDMMAP
62446 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62447 +#endif
62448 +
62449 /* requesting a specific address */
62450 if (addr) {
62451 addr = PAGE_ALIGN(addr);
62452 - vma = find_vma(mm, addr);
62453 - if (TASK_SIZE - len >= addr &&
62454 - (!vma || addr + len <= vma->vm_start))
62455 - return addr;
62456 + if (TASK_SIZE - len >= addr) {
62457 + vma = find_vma(mm, addr);
62458 + if (check_heap_stack_gap(vma, addr, len))
62459 + return addr;
62460 + }
62461 }
62462
62463 /* check if free_area_cache is useful for us */
62464 @@ -1508,7 +1718,7 @@ arch_get_unmapped_area_topdown(struct fi
62465 /* make sure it can fit in the remaining address space */
62466 if (addr > len) {
62467 vma = find_vma(mm, addr-len);
62468 - if (!vma || addr <= vma->vm_start)
62469 + if (check_heap_stack_gap(vma, addr - len, len))
62470 /* remember the address as a hint for next time */
62471 return (mm->free_area_cache = addr-len);
62472 }
62473 @@ -1525,7 +1735,7 @@ arch_get_unmapped_area_topdown(struct fi
62474 * return with success:
62475 */
62476 vma = find_vma(mm, addr);
62477 - if (!vma || addr+len <= vma->vm_start)
62478 + if (check_heap_stack_gap(vma, addr, len))
62479 /* remember the address as a hint for next time */
62480 return (mm->free_area_cache = addr);
62481
62482 @@ -1534,8 +1744,8 @@ arch_get_unmapped_area_topdown(struct fi
62483 mm->cached_hole_size = vma->vm_start - addr;
62484
62485 /* try just below the current vma->vm_start */
62486 - addr = vma->vm_start-len;
62487 - } while (len < vma->vm_start);
62488 + addr = skip_heap_stack_gap(vma, len);
62489 + } while (!IS_ERR_VALUE(addr));
62490
62491 bottomup:
62492 /*
62493 @@ -1544,13 +1754,21 @@ bottomup:
62494 * can happen with large stack limits and large mmap()
62495 * allocations.
62496 */
62497 + mm->mmap_base = TASK_UNMAPPED_BASE;
62498 +
62499 +#ifdef CONFIG_PAX_RANDMMAP
62500 + if (mm->pax_flags & MF_PAX_RANDMMAP)
62501 + mm->mmap_base += mm->delta_mmap;
62502 +#endif
62503 +
62504 + mm->free_area_cache = mm->mmap_base;
62505 mm->cached_hole_size = ~0UL;
62506 - mm->free_area_cache = TASK_UNMAPPED_BASE;
62507 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62508 /*
62509 * Restore the topdown base:
62510 */
62511 - mm->free_area_cache = mm->mmap_base;
62512 + mm->mmap_base = base;
62513 + mm->free_area_cache = base;
62514 mm->cached_hole_size = ~0UL;
62515
62516 return addr;
62517 @@ -1559,6 +1777,12 @@ bottomup:
62518
62519 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62520 {
62521 +
62522 +#ifdef CONFIG_PAX_SEGMEXEC
62523 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62524 + return;
62525 +#endif
62526 +
62527 /*
62528 * Is this a new hole at the highest possible address?
62529 */
62530 @@ -1566,8 +1790,10 @@ void arch_unmap_area_topdown(struct mm_s
62531 mm->free_area_cache = addr;
62532
62533 /* dont allow allocations above current base */
62534 - if (mm->free_area_cache > mm->mmap_base)
62535 + if (mm->free_area_cache > mm->mmap_base) {
62536 mm->free_area_cache = mm->mmap_base;
62537 + mm->cached_hole_size = ~0UL;
62538 + }
62539 }
62540
62541 unsigned long
62542 @@ -1675,6 +1901,28 @@ out:
62543 return prev ? prev->vm_next : vma;
62544 }
62545
62546 +#ifdef CONFIG_PAX_SEGMEXEC
62547 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62548 +{
62549 + struct vm_area_struct *vma_m;
62550 +
62551 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62552 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62553 + BUG_ON(vma->vm_mirror);
62554 + return NULL;
62555 + }
62556 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62557 + vma_m = vma->vm_mirror;
62558 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62559 + BUG_ON(vma->vm_file != vma_m->vm_file);
62560 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62561 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62562 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62563 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62564 + return vma_m;
62565 +}
62566 +#endif
62567 +
62568 /*
62569 * Verify that the stack growth is acceptable and
62570 * update accounting. This is shared with both the
62571 @@ -1691,6 +1939,7 @@ static int acct_stack_growth(struct vm_a
62572 return -ENOMEM;
62573
62574 /* Stack limit test */
62575 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
62576 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62577 return -ENOMEM;
62578
62579 @@ -1701,6 +1950,7 @@ static int acct_stack_growth(struct vm_a
62580 locked = mm->locked_vm + grow;
62581 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62582 limit >>= PAGE_SHIFT;
62583 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62584 if (locked > limit && !capable(CAP_IPC_LOCK))
62585 return -ENOMEM;
62586 }
62587 @@ -1731,37 +1981,48 @@ static int acct_stack_growth(struct vm_a
62588 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62589 * vma is the last one with address > vma->vm_end. Have to extend vma.
62590 */
62591 +#ifndef CONFIG_IA64
62592 +static
62593 +#endif
62594 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62595 {
62596 int error;
62597 + bool locknext;
62598
62599 if (!(vma->vm_flags & VM_GROWSUP))
62600 return -EFAULT;
62601
62602 + /* Also guard against wrapping around to address 0. */
62603 + if (address < PAGE_ALIGN(address+1))
62604 + address = PAGE_ALIGN(address+1);
62605 + else
62606 + return -ENOMEM;
62607 +
62608 /*
62609 * We must make sure the anon_vma is allocated
62610 * so that the anon_vma locking is not a noop.
62611 */
62612 if (unlikely(anon_vma_prepare(vma)))
62613 return -ENOMEM;
62614 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62615 + if (locknext && anon_vma_prepare(vma->vm_next))
62616 + return -ENOMEM;
62617 vma_lock_anon_vma(vma);
62618 + if (locknext)
62619 + vma_lock_anon_vma(vma->vm_next);
62620
62621 /*
62622 * vma->vm_start/vm_end cannot change under us because the caller
62623 * is required to hold the mmap_sem in read mode. We need the
62624 - * anon_vma lock to serialize against concurrent expand_stacks.
62625 - * Also guard against wrapping around to address 0.
62626 + * anon_vma locks to serialize against concurrent expand_stacks
62627 + * and expand_upwards.
62628 */
62629 - if (address < PAGE_ALIGN(address+4))
62630 - address = PAGE_ALIGN(address+4);
62631 - else {
62632 - vma_unlock_anon_vma(vma);
62633 - return -ENOMEM;
62634 - }
62635 error = 0;
62636
62637 /* Somebody else might have raced and expanded it already */
62638 - if (address > vma->vm_end) {
62639 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62640 + error = -ENOMEM;
62641 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62642 unsigned long size, grow;
62643
62644 size = address - vma->vm_start;
62645 @@ -1776,6 +2037,8 @@ int expand_upwards(struct vm_area_struct
62646 }
62647 }
62648 }
62649 + if (locknext)
62650 + vma_unlock_anon_vma(vma->vm_next);
62651 vma_unlock_anon_vma(vma);
62652 khugepaged_enter_vma_merge(vma);
62653 return error;
62654 @@ -1789,6 +2052,8 @@ static int expand_downwards(struct vm_ar
62655 unsigned long address)
62656 {
62657 int error;
62658 + bool lockprev = false;
62659 + struct vm_area_struct *prev;
62660
62661 /*
62662 * We must make sure the anon_vma is allocated
62663 @@ -1802,6 +2067,15 @@ static int expand_downwards(struct vm_ar
62664 if (error)
62665 return error;
62666
62667 + prev = vma->vm_prev;
62668 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62669 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62670 +#endif
62671 + if (lockprev && anon_vma_prepare(prev))
62672 + return -ENOMEM;
62673 + if (lockprev)
62674 + vma_lock_anon_vma(prev);
62675 +
62676 vma_lock_anon_vma(vma);
62677
62678 /*
62679 @@ -1811,9 +2085,17 @@ static int expand_downwards(struct vm_ar
62680 */
62681
62682 /* Somebody else might have raced and expanded it already */
62683 - if (address < vma->vm_start) {
62684 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62685 + error = -ENOMEM;
62686 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62687 unsigned long size, grow;
62688
62689 +#ifdef CONFIG_PAX_SEGMEXEC
62690 + struct vm_area_struct *vma_m;
62691 +
62692 + vma_m = pax_find_mirror_vma(vma);
62693 +#endif
62694 +
62695 size = vma->vm_end - address;
62696 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62697
62698 @@ -1823,11 +2105,22 @@ static int expand_downwards(struct vm_ar
62699 if (!error) {
62700 vma->vm_start = address;
62701 vma->vm_pgoff -= grow;
62702 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62703 +
62704 +#ifdef CONFIG_PAX_SEGMEXEC
62705 + if (vma_m) {
62706 + vma_m->vm_start -= grow << PAGE_SHIFT;
62707 + vma_m->vm_pgoff -= grow;
62708 + }
62709 +#endif
62710 +
62711 perf_event_mmap(vma);
62712 }
62713 }
62714 }
62715 vma_unlock_anon_vma(vma);
62716 + if (lockprev)
62717 + vma_unlock_anon_vma(prev);
62718 khugepaged_enter_vma_merge(vma);
62719 return error;
62720 }
62721 @@ -1902,6 +2195,13 @@ static void remove_vma_list(struct mm_st
62722 do {
62723 long nrpages = vma_pages(vma);
62724
62725 +#ifdef CONFIG_PAX_SEGMEXEC
62726 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62727 + vma = remove_vma(vma);
62728 + continue;
62729 + }
62730 +#endif
62731 +
62732 mm->total_vm -= nrpages;
62733 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62734 vma = remove_vma(vma);
62735 @@ -1947,6 +2247,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62736 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62737 vma->vm_prev = NULL;
62738 do {
62739 +
62740 +#ifdef CONFIG_PAX_SEGMEXEC
62741 + if (vma->vm_mirror) {
62742 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62743 + vma->vm_mirror->vm_mirror = NULL;
62744 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
62745 + vma->vm_mirror = NULL;
62746 + }
62747 +#endif
62748 +
62749 rb_erase(&vma->vm_rb, &mm->mm_rb);
62750 mm->map_count--;
62751 tail_vma = vma;
62752 @@ -1975,14 +2285,33 @@ static int __split_vma(struct mm_struct
62753 struct vm_area_struct *new;
62754 int err = -ENOMEM;
62755
62756 +#ifdef CONFIG_PAX_SEGMEXEC
62757 + struct vm_area_struct *vma_m, *new_m = NULL;
62758 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62759 +#endif
62760 +
62761 if (is_vm_hugetlb_page(vma) && (addr &
62762 ~(huge_page_mask(hstate_vma(vma)))))
62763 return -EINVAL;
62764
62765 +#ifdef CONFIG_PAX_SEGMEXEC
62766 + vma_m = pax_find_mirror_vma(vma);
62767 +#endif
62768 +
62769 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62770 if (!new)
62771 goto out_err;
62772
62773 +#ifdef CONFIG_PAX_SEGMEXEC
62774 + if (vma_m) {
62775 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62776 + if (!new_m) {
62777 + kmem_cache_free(vm_area_cachep, new);
62778 + goto out_err;
62779 + }
62780 + }
62781 +#endif
62782 +
62783 /* most fields are the same, copy all, and then fixup */
62784 *new = *vma;
62785
62786 @@ -1995,6 +2324,22 @@ static int __split_vma(struct mm_struct
62787 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62788 }
62789
62790 +#ifdef CONFIG_PAX_SEGMEXEC
62791 + if (vma_m) {
62792 + *new_m = *vma_m;
62793 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
62794 + new_m->vm_mirror = new;
62795 + new->vm_mirror = new_m;
62796 +
62797 + if (new_below)
62798 + new_m->vm_end = addr_m;
62799 + else {
62800 + new_m->vm_start = addr_m;
62801 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62802 + }
62803 + }
62804 +#endif
62805 +
62806 pol = mpol_dup(vma_policy(vma));
62807 if (IS_ERR(pol)) {
62808 err = PTR_ERR(pol);
62809 @@ -2020,6 +2365,42 @@ static int __split_vma(struct mm_struct
62810 else
62811 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62812
62813 +#ifdef CONFIG_PAX_SEGMEXEC
62814 + if (!err && vma_m) {
62815 + if (anon_vma_clone(new_m, vma_m))
62816 + goto out_free_mpol;
62817 +
62818 + mpol_get(pol);
62819 + vma_set_policy(new_m, pol);
62820 +
62821 + if (new_m->vm_file) {
62822 + get_file(new_m->vm_file);
62823 + if (vma_m->vm_flags & VM_EXECUTABLE)
62824 + added_exe_file_vma(mm);
62825 + }
62826 +
62827 + if (new_m->vm_ops && new_m->vm_ops->open)
62828 + new_m->vm_ops->open(new_m);
62829 +
62830 + if (new_below)
62831 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62832 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62833 + else
62834 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62835 +
62836 + if (err) {
62837 + if (new_m->vm_ops && new_m->vm_ops->close)
62838 + new_m->vm_ops->close(new_m);
62839 + if (new_m->vm_file) {
62840 + if (vma_m->vm_flags & VM_EXECUTABLE)
62841 + removed_exe_file_vma(mm);
62842 + fput(new_m->vm_file);
62843 + }
62844 + mpol_put(pol);
62845 + }
62846 + }
62847 +#endif
62848 +
62849 /* Success. */
62850 if (!err)
62851 return 0;
62852 @@ -2032,10 +2413,18 @@ static int __split_vma(struct mm_struct
62853 removed_exe_file_vma(mm);
62854 fput(new->vm_file);
62855 }
62856 - unlink_anon_vmas(new);
62857 out_free_mpol:
62858 mpol_put(pol);
62859 out_free_vma:
62860 +
62861 +#ifdef CONFIG_PAX_SEGMEXEC
62862 + if (new_m) {
62863 + unlink_anon_vmas(new_m);
62864 + kmem_cache_free(vm_area_cachep, new_m);
62865 + }
62866 +#endif
62867 +
62868 + unlink_anon_vmas(new);
62869 kmem_cache_free(vm_area_cachep, new);
62870 out_err:
62871 return err;
62872 @@ -2048,6 +2437,15 @@ static int __split_vma(struct mm_struct
62873 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62874 unsigned long addr, int new_below)
62875 {
62876 +
62877 +#ifdef CONFIG_PAX_SEGMEXEC
62878 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62879 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62880 + if (mm->map_count >= sysctl_max_map_count-1)
62881 + return -ENOMEM;
62882 + } else
62883 +#endif
62884 +
62885 if (mm->map_count >= sysctl_max_map_count)
62886 return -ENOMEM;
62887
62888 @@ -2059,11 +2457,30 @@ int split_vma(struct mm_struct *mm, stru
62889 * work. This now handles partial unmappings.
62890 * Jeremy Fitzhardinge <jeremy@goop.org>
62891 */
62892 +#ifdef CONFIG_PAX_SEGMEXEC
62893 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62894 {
62895 + int ret = __do_munmap(mm, start, len);
62896 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62897 + return ret;
62898 +
62899 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62900 +}
62901 +
62902 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62903 +#else
62904 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62905 +#endif
62906 +{
62907 unsigned long end;
62908 struct vm_area_struct *vma, *prev, *last;
62909
62910 + /*
62911 + * mm->mmap_sem is required to protect against another thread
62912 + * changing the mappings in case we sleep.
62913 + */
62914 + verify_mm_writelocked(mm);
62915 +
62916 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
62917 return -EINVAL;
62918
62919 @@ -2137,6 +2554,8 @@ int do_munmap(struct mm_struct *mm, unsi
62920 /* Fix up all other VM information */
62921 remove_vma_list(mm, vma);
62922
62923 + track_exec_limit(mm, start, end, 0UL);
62924 +
62925 return 0;
62926 }
62927
62928 @@ -2149,22 +2568,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
62929
62930 profile_munmap(addr);
62931
62932 +#ifdef CONFIG_PAX_SEGMEXEC
62933 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
62934 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
62935 + return -EINVAL;
62936 +#endif
62937 +
62938 down_write(&mm->mmap_sem);
62939 ret = do_munmap(mm, addr, len);
62940 up_write(&mm->mmap_sem);
62941 return ret;
62942 }
62943
62944 -static inline void verify_mm_writelocked(struct mm_struct *mm)
62945 -{
62946 -#ifdef CONFIG_DEBUG_VM
62947 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62948 - WARN_ON(1);
62949 - up_read(&mm->mmap_sem);
62950 - }
62951 -#endif
62952 -}
62953 -
62954 /*
62955 * this is really a simplified "do_mmap". it only handles
62956 * anonymous maps. eventually we may be able to do some
62957 @@ -2178,6 +2593,7 @@ unsigned long do_brk(unsigned long addr,
62958 struct rb_node ** rb_link, * rb_parent;
62959 pgoff_t pgoff = addr >> PAGE_SHIFT;
62960 int error;
62961 + unsigned long charged;
62962
62963 len = PAGE_ALIGN(len);
62964 if (!len)
62965 @@ -2189,16 +2605,30 @@ unsigned long do_brk(unsigned long addr,
62966
62967 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
62968
62969 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62970 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
62971 + flags &= ~VM_EXEC;
62972 +
62973 +#ifdef CONFIG_PAX_MPROTECT
62974 + if (mm->pax_flags & MF_PAX_MPROTECT)
62975 + flags &= ~VM_MAYEXEC;
62976 +#endif
62977 +
62978 + }
62979 +#endif
62980 +
62981 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
62982 if (error & ~PAGE_MASK)
62983 return error;
62984
62985 + charged = len >> PAGE_SHIFT;
62986 +
62987 /*
62988 * mlock MCL_FUTURE?
62989 */
62990 if (mm->def_flags & VM_LOCKED) {
62991 unsigned long locked, lock_limit;
62992 - locked = len >> PAGE_SHIFT;
62993 + locked = charged;
62994 locked += mm->locked_vm;
62995 lock_limit = rlimit(RLIMIT_MEMLOCK);
62996 lock_limit >>= PAGE_SHIFT;
62997 @@ -2215,22 +2645,22 @@ unsigned long do_brk(unsigned long addr,
62998 /*
62999 * Clear old maps. this also does some error checking for us
63000 */
63001 - munmap_back:
63002 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63003 if (vma && vma->vm_start < addr + len) {
63004 if (do_munmap(mm, addr, len))
63005 return -ENOMEM;
63006 - goto munmap_back;
63007 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63008 + BUG_ON(vma && vma->vm_start < addr + len);
63009 }
63010
63011 /* Check against address space limits *after* clearing old maps... */
63012 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
63013 + if (!may_expand_vm(mm, charged))
63014 return -ENOMEM;
63015
63016 if (mm->map_count > sysctl_max_map_count)
63017 return -ENOMEM;
63018
63019 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
63020 + if (security_vm_enough_memory(charged))
63021 return -ENOMEM;
63022
63023 /* Can we just expand an old private anonymous mapping? */
63024 @@ -2244,7 +2674,7 @@ unsigned long do_brk(unsigned long addr,
63025 */
63026 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63027 if (!vma) {
63028 - vm_unacct_memory(len >> PAGE_SHIFT);
63029 + vm_unacct_memory(charged);
63030 return -ENOMEM;
63031 }
63032
63033 @@ -2258,11 +2688,12 @@ unsigned long do_brk(unsigned long addr,
63034 vma_link(mm, vma, prev, rb_link, rb_parent);
63035 out:
63036 perf_event_mmap(vma);
63037 - mm->total_vm += len >> PAGE_SHIFT;
63038 + mm->total_vm += charged;
63039 if (flags & VM_LOCKED) {
63040 if (!mlock_vma_pages_range(vma, addr, addr + len))
63041 - mm->locked_vm += (len >> PAGE_SHIFT);
63042 + mm->locked_vm += charged;
63043 }
63044 + track_exec_limit(mm, addr, addr + len, flags);
63045 return addr;
63046 }
63047
63048 @@ -2309,8 +2740,10 @@ void exit_mmap(struct mm_struct *mm)
63049 * Walk the list again, actually closing and freeing it,
63050 * with preemption enabled, without holding any MM locks.
63051 */
63052 - while (vma)
63053 + while (vma) {
63054 + vma->vm_mirror = NULL;
63055 vma = remove_vma(vma);
63056 + }
63057
63058 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
63059 }
63060 @@ -2324,6 +2757,13 @@ int insert_vm_struct(struct mm_struct *
63061 struct vm_area_struct * __vma, * prev;
63062 struct rb_node ** rb_link, * rb_parent;
63063
63064 +#ifdef CONFIG_PAX_SEGMEXEC
63065 + struct vm_area_struct *vma_m = NULL;
63066 +#endif
63067 +
63068 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
63069 + return -EPERM;
63070 +
63071 /*
63072 * The vm_pgoff of a purely anonymous vma should be irrelevant
63073 * until its first write fault, when page's anon_vma and index
63074 @@ -2346,7 +2786,22 @@ int insert_vm_struct(struct mm_struct *
63075 if ((vma->vm_flags & VM_ACCOUNT) &&
63076 security_vm_enough_memory_mm(mm, vma_pages(vma)))
63077 return -ENOMEM;
63078 +
63079 +#ifdef CONFIG_PAX_SEGMEXEC
63080 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
63081 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63082 + if (!vma_m)
63083 + return -ENOMEM;
63084 + }
63085 +#endif
63086 +
63087 vma_link(mm, vma, prev, rb_link, rb_parent);
63088 +
63089 +#ifdef CONFIG_PAX_SEGMEXEC
63090 + if (vma_m)
63091 + BUG_ON(pax_mirror_vma(vma_m, vma));
63092 +#endif
63093 +
63094 return 0;
63095 }
63096
63097 @@ -2364,6 +2819,8 @@ struct vm_area_struct *copy_vma(struct v
63098 struct rb_node **rb_link, *rb_parent;
63099 struct mempolicy *pol;
63100
63101 + BUG_ON(vma->vm_mirror);
63102 +
63103 /*
63104 * If anonymous vma has not yet been faulted, update new pgoff
63105 * to match new location, to increase its chance of merging.
63106 @@ -2414,6 +2871,39 @@ struct vm_area_struct *copy_vma(struct v
63107 return NULL;
63108 }
63109
63110 +#ifdef CONFIG_PAX_SEGMEXEC
63111 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
63112 +{
63113 + struct vm_area_struct *prev_m;
63114 + struct rb_node **rb_link_m, *rb_parent_m;
63115 + struct mempolicy *pol_m;
63116 +
63117 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
63118 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
63119 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
63120 + *vma_m = *vma;
63121 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
63122 + if (anon_vma_clone(vma_m, vma))
63123 + return -ENOMEM;
63124 + pol_m = vma_policy(vma_m);
63125 + mpol_get(pol_m);
63126 + vma_set_policy(vma_m, pol_m);
63127 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
63128 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
63129 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
63130 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
63131 + if (vma_m->vm_file)
63132 + get_file(vma_m->vm_file);
63133 + if (vma_m->vm_ops && vma_m->vm_ops->open)
63134 + vma_m->vm_ops->open(vma_m);
63135 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
63136 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
63137 + vma_m->vm_mirror = vma;
63138 + vma->vm_mirror = vma_m;
63139 + return 0;
63140 +}
63141 +#endif
63142 +
63143 /*
63144 * Return true if the calling process may expand its vm space by the passed
63145 * number of pages
63146 @@ -2424,7 +2914,7 @@ int may_expand_vm(struct mm_struct *mm,
63147 unsigned long lim;
63148
63149 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
63150 -
63151 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
63152 if (cur + npages > lim)
63153 return 0;
63154 return 1;
63155 @@ -2495,6 +2985,22 @@ int install_special_mapping(struct mm_st
63156 vma->vm_start = addr;
63157 vma->vm_end = addr + len;
63158
63159 +#ifdef CONFIG_PAX_MPROTECT
63160 + if (mm->pax_flags & MF_PAX_MPROTECT) {
63161 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
63162 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
63163 + return -EPERM;
63164 + if (!(vm_flags & VM_EXEC))
63165 + vm_flags &= ~VM_MAYEXEC;
63166 +#else
63167 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
63168 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
63169 +#endif
63170 + else
63171 + vm_flags &= ~VM_MAYWRITE;
63172 + }
63173 +#endif
63174 +
63175 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
63176 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
63177
63178 diff -urNp linux-2.6.39.4/mm/mprotect.c linux-2.6.39.4/mm/mprotect.c
63179 --- linux-2.6.39.4/mm/mprotect.c 2011-05-19 00:06:34.000000000 -0400
63180 +++ linux-2.6.39.4/mm/mprotect.c 2011-08-05 19:44:37.000000000 -0400
63181 @@ -23,10 +23,16 @@
63182 #include <linux/mmu_notifier.h>
63183 #include <linux/migrate.h>
63184 #include <linux/perf_event.h>
63185 +
63186 +#ifdef CONFIG_PAX_MPROTECT
63187 +#include <linux/elf.h>
63188 +#endif
63189 +
63190 #include <asm/uaccess.h>
63191 #include <asm/pgtable.h>
63192 #include <asm/cacheflush.h>
63193 #include <asm/tlbflush.h>
63194 +#include <asm/mmu_context.h>
63195
63196 #ifndef pgprot_modify
63197 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
63198 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
63199 flush_tlb_range(vma, start, end);
63200 }
63201
63202 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63203 +/* called while holding the mmap semaphor for writing except stack expansion */
63204 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
63205 +{
63206 + unsigned long oldlimit, newlimit = 0UL;
63207 +
63208 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
63209 + return;
63210 +
63211 + spin_lock(&mm->page_table_lock);
63212 + oldlimit = mm->context.user_cs_limit;
63213 + if ((prot & VM_EXEC) && oldlimit < end)
63214 + /* USER_CS limit moved up */
63215 + newlimit = end;
63216 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
63217 + /* USER_CS limit moved down */
63218 + newlimit = start;
63219 +
63220 + if (newlimit) {
63221 + mm->context.user_cs_limit = newlimit;
63222 +
63223 +#ifdef CONFIG_SMP
63224 + wmb();
63225 + cpus_clear(mm->context.cpu_user_cs_mask);
63226 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
63227 +#endif
63228 +
63229 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
63230 + }
63231 + spin_unlock(&mm->page_table_lock);
63232 + if (newlimit == end) {
63233 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
63234 +
63235 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
63236 + if (is_vm_hugetlb_page(vma))
63237 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
63238 + else
63239 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
63240 + }
63241 +}
63242 +#endif
63243 +
63244 int
63245 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
63246 unsigned long start, unsigned long end, unsigned long newflags)
63247 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
63248 int error;
63249 int dirty_accountable = 0;
63250
63251 +#ifdef CONFIG_PAX_SEGMEXEC
63252 + struct vm_area_struct *vma_m = NULL;
63253 + unsigned long start_m, end_m;
63254 +
63255 + start_m = start + SEGMEXEC_TASK_SIZE;
63256 + end_m = end + SEGMEXEC_TASK_SIZE;
63257 +#endif
63258 +
63259 if (newflags == oldflags) {
63260 *pprev = vma;
63261 return 0;
63262 }
63263
63264 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
63265 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63266 +
63267 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63268 + return -ENOMEM;
63269 +
63270 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63271 + return -ENOMEM;
63272 + }
63273 +
63274 /*
63275 * If we make a private mapping writable we increase our commit;
63276 * but (without finer accounting) cannot reduce our commit if we
63277 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63278 }
63279 }
63280
63281 +#ifdef CONFIG_PAX_SEGMEXEC
63282 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63283 + if (start != vma->vm_start) {
63284 + error = split_vma(mm, vma, start, 1);
63285 + if (error)
63286 + goto fail;
63287 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63288 + *pprev = (*pprev)->vm_next;
63289 + }
63290 +
63291 + if (end != vma->vm_end) {
63292 + error = split_vma(mm, vma, end, 0);
63293 + if (error)
63294 + goto fail;
63295 + }
63296 +
63297 + if (pax_find_mirror_vma(vma)) {
63298 + error = __do_munmap(mm, start_m, end_m - start_m);
63299 + if (error)
63300 + goto fail;
63301 + } else {
63302 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63303 + if (!vma_m) {
63304 + error = -ENOMEM;
63305 + goto fail;
63306 + }
63307 + vma->vm_flags = newflags;
63308 + error = pax_mirror_vma(vma_m, vma);
63309 + if (error) {
63310 + vma->vm_flags = oldflags;
63311 + goto fail;
63312 + }
63313 + }
63314 + }
63315 +#endif
63316 +
63317 /*
63318 * First try to merge with previous and/or next vma.
63319 */
63320 @@ -204,9 +306,21 @@ success:
63321 * vm_flags and vm_page_prot are protected by the mmap_sem
63322 * held in write mode.
63323 */
63324 +
63325 +#ifdef CONFIG_PAX_SEGMEXEC
63326 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63327 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63328 +#endif
63329 +
63330 vma->vm_flags = newflags;
63331 +
63332 +#ifdef CONFIG_PAX_MPROTECT
63333 + if (mm->binfmt && mm->binfmt->handle_mprotect)
63334 + mm->binfmt->handle_mprotect(vma, newflags);
63335 +#endif
63336 +
63337 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63338 - vm_get_page_prot(newflags));
63339 + vm_get_page_prot(vma->vm_flags));
63340
63341 if (vma_wants_writenotify(vma)) {
63342 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63343 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63344 end = start + len;
63345 if (end <= start)
63346 return -ENOMEM;
63347 +
63348 +#ifdef CONFIG_PAX_SEGMEXEC
63349 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63350 + if (end > SEGMEXEC_TASK_SIZE)
63351 + return -EINVAL;
63352 + } else
63353 +#endif
63354 +
63355 + if (end > TASK_SIZE)
63356 + return -EINVAL;
63357 +
63358 if (!arch_validate_prot(prot))
63359 return -EINVAL;
63360
63361 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63362 /*
63363 * Does the application expect PROT_READ to imply PROT_EXEC:
63364 */
63365 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63366 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63367 prot |= PROT_EXEC;
63368
63369 vm_flags = calc_vm_prot_bits(prot);
63370 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63371 if (start > vma->vm_start)
63372 prev = vma;
63373
63374 +#ifdef CONFIG_PAX_MPROTECT
63375 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63376 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
63377 +#endif
63378 +
63379 for (nstart = start ; ; ) {
63380 unsigned long newflags;
63381
63382 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63383
63384 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63385 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63386 + if (prot & (PROT_WRITE | PROT_EXEC))
63387 + gr_log_rwxmprotect(vma->vm_file);
63388 +
63389 + error = -EACCES;
63390 + goto out;
63391 + }
63392 +
63393 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63394 error = -EACCES;
63395 goto out;
63396 }
63397 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63398 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63399 if (error)
63400 goto out;
63401 +
63402 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
63403 +
63404 nstart = tmp;
63405
63406 if (nstart < prev->vm_end)
63407 diff -urNp linux-2.6.39.4/mm/mremap.c linux-2.6.39.4/mm/mremap.c
63408 --- linux-2.6.39.4/mm/mremap.c 2011-05-19 00:06:34.000000000 -0400
63409 +++ linux-2.6.39.4/mm/mremap.c 2011-08-05 19:44:37.000000000 -0400
63410 @@ -114,6 +114,12 @@ static void move_ptes(struct vm_area_str
63411 continue;
63412 pte = ptep_clear_flush(vma, old_addr, old_pte);
63413 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63414 +
63415 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63416 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63417 + pte = pte_exprotect(pte);
63418 +#endif
63419 +
63420 set_pte_at(mm, new_addr, new_pte, pte);
63421 }
63422
63423 @@ -273,6 +279,11 @@ static struct vm_area_struct *vma_to_res
63424 if (is_vm_hugetlb_page(vma))
63425 goto Einval;
63426
63427 +#ifdef CONFIG_PAX_SEGMEXEC
63428 + if (pax_find_mirror_vma(vma))
63429 + goto Einval;
63430 +#endif
63431 +
63432 /* We can't remap across vm area boundaries */
63433 if (old_len > vma->vm_end - addr)
63434 goto Efault;
63435 @@ -329,20 +340,25 @@ static unsigned long mremap_to(unsigned
63436 unsigned long ret = -EINVAL;
63437 unsigned long charged = 0;
63438 unsigned long map_flags;
63439 + unsigned long pax_task_size = TASK_SIZE;
63440
63441 if (new_addr & ~PAGE_MASK)
63442 goto out;
63443
63444 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63445 +#ifdef CONFIG_PAX_SEGMEXEC
63446 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63447 + pax_task_size = SEGMEXEC_TASK_SIZE;
63448 +#endif
63449 +
63450 + pax_task_size -= PAGE_SIZE;
63451 +
63452 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63453 goto out;
63454
63455 /* Check if the location we're moving into overlaps the
63456 * old location at all, and fail if it does.
63457 */
63458 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
63459 - goto out;
63460 -
63461 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
63462 + if (addr + old_len > new_addr && new_addr + new_len > addr)
63463 goto out;
63464
63465 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63466 @@ -414,6 +430,7 @@ unsigned long do_mremap(unsigned long ad
63467 struct vm_area_struct *vma;
63468 unsigned long ret = -EINVAL;
63469 unsigned long charged = 0;
63470 + unsigned long pax_task_size = TASK_SIZE;
63471
63472 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63473 goto out;
63474 @@ -432,6 +449,17 @@ unsigned long do_mremap(unsigned long ad
63475 if (!new_len)
63476 goto out;
63477
63478 +#ifdef CONFIG_PAX_SEGMEXEC
63479 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63480 + pax_task_size = SEGMEXEC_TASK_SIZE;
63481 +#endif
63482 +
63483 + pax_task_size -= PAGE_SIZE;
63484 +
63485 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63486 + old_len > pax_task_size || addr > pax_task_size-old_len)
63487 + goto out;
63488 +
63489 if (flags & MREMAP_FIXED) {
63490 if (flags & MREMAP_MAYMOVE)
63491 ret = mremap_to(addr, old_len, new_addr, new_len);
63492 @@ -481,6 +509,7 @@ unsigned long do_mremap(unsigned long ad
63493 addr + new_len);
63494 }
63495 ret = addr;
63496 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63497 goto out;
63498 }
63499 }
63500 @@ -507,7 +536,13 @@ unsigned long do_mremap(unsigned long ad
63501 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63502 if (ret)
63503 goto out;
63504 +
63505 + map_flags = vma->vm_flags;
63506 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63507 + if (!(ret & ~PAGE_MASK)) {
63508 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63509 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63510 + }
63511 }
63512 out:
63513 if (ret & ~PAGE_MASK)
63514 diff -urNp linux-2.6.39.4/mm/nobootmem.c linux-2.6.39.4/mm/nobootmem.c
63515 --- linux-2.6.39.4/mm/nobootmem.c 2011-05-19 00:06:34.000000000 -0400
63516 +++ linux-2.6.39.4/mm/nobootmem.c 2011-08-05 19:44:37.000000000 -0400
63517 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63518 unsigned long __init free_all_memory_core_early(int nodeid)
63519 {
63520 int i;
63521 - u64 start, end;
63522 + u64 start, end, startrange, endrange;
63523 unsigned long count = 0;
63524 - struct range *range = NULL;
63525 + struct range *range = NULL, rangerange = { 0, 0 };
63526 int nr_range;
63527
63528 nr_range = get_free_all_memory_range(&range, nodeid);
63529 + startrange = __pa(range) >> PAGE_SHIFT;
63530 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63531
63532 for (i = 0; i < nr_range; i++) {
63533 start = range[i].start;
63534 end = range[i].end;
63535 + if (start <= endrange && startrange < end) {
63536 + BUG_ON(rangerange.start | rangerange.end);
63537 + rangerange = range[i];
63538 + continue;
63539 + }
63540 count += end - start;
63541 __free_pages_memory(start, end);
63542 }
63543 + start = rangerange.start;
63544 + end = rangerange.end;
63545 + count += end - start;
63546 + __free_pages_memory(start, end);
63547
63548 return count;
63549 }
63550 diff -urNp linux-2.6.39.4/mm/nommu.c linux-2.6.39.4/mm/nommu.c
63551 --- linux-2.6.39.4/mm/nommu.c 2011-08-05 21:11:51.000000000 -0400
63552 +++ linux-2.6.39.4/mm/nommu.c 2011-08-05 21:12:20.000000000 -0400
63553 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63554 int sysctl_overcommit_ratio = 50; /* default is 50% */
63555 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63556 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63557 -int heap_stack_gap = 0;
63558
63559 atomic_long_t mmap_pages_allocated;
63560
63561 @@ -833,15 +832,6 @@ struct vm_area_struct *find_vma(struct m
63562 EXPORT_SYMBOL(find_vma);
63563
63564 /*
63565 - * find a VMA
63566 - * - we don't extend stack VMAs under NOMMU conditions
63567 - */
63568 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63569 -{
63570 - return find_vma(mm, addr);
63571 -}
63572 -
63573 -/*
63574 * expand a stack to a given address
63575 * - not supported under NOMMU conditions
63576 */
63577 @@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, stru
63578
63579 /* most fields are the same, copy all, and then fixup */
63580 *new = *vma;
63581 + INIT_LIST_HEAD(&new->anon_vma_chain);
63582 *region = *vma->vm_region;
63583 new->vm_region = region;
63584
63585 diff -urNp linux-2.6.39.4/mm/page_alloc.c linux-2.6.39.4/mm/page_alloc.c
63586 --- linux-2.6.39.4/mm/page_alloc.c 2011-06-03 00:04:14.000000000 -0400
63587 +++ linux-2.6.39.4/mm/page_alloc.c 2011-08-05 19:44:37.000000000 -0400
63588 @@ -337,7 +337,7 @@ out:
63589 * This usage means that zero-order pages may not be compound.
63590 */
63591
63592 -static void free_compound_page(struct page *page)
63593 +void free_compound_page(struct page *page)
63594 {
63595 __free_pages_ok(page, compound_order(page));
63596 }
63597 @@ -650,6 +650,10 @@ static bool free_pages_prepare(struct pa
63598 int i;
63599 int bad = 0;
63600
63601 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63602 + unsigned long index = 1UL << order;
63603 +#endif
63604 +
63605 trace_mm_page_free_direct(page, order);
63606 kmemcheck_free_shadow(page, order);
63607
63608 @@ -665,6 +669,12 @@ static bool free_pages_prepare(struct pa
63609 debug_check_no_obj_freed(page_address(page),
63610 PAGE_SIZE << order);
63611 }
63612 +
63613 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63614 + for (; index; --index)
63615 + sanitize_highpage(page + index - 1);
63616 +#endif
63617 +
63618 arch_free_page(page, order);
63619 kernel_map_pages(page, 1 << order, 0);
63620
63621 @@ -780,8 +790,10 @@ static int prep_new_page(struct page *pa
63622 arch_alloc_page(page, order);
63623 kernel_map_pages(page, 1 << order, 1);
63624
63625 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
63626 if (gfp_flags & __GFP_ZERO)
63627 prep_zero_page(page, order, gfp_flags);
63628 +#endif
63629
63630 if (order && (gfp_flags & __GFP_COMP))
63631 prep_compound_page(page, order);
63632 @@ -2504,6 +2516,8 @@ void __show_free_areas(unsigned int filt
63633 int cpu;
63634 struct zone *zone;
63635
63636 + pax_track_stack();
63637 +
63638 for_each_populated_zone(zone) {
63639 if (skip_free_areas_zone(filter, zone))
63640 continue;
63641 diff -urNp linux-2.6.39.4/mm/percpu.c linux-2.6.39.4/mm/percpu.c
63642 --- linux-2.6.39.4/mm/percpu.c 2011-05-19 00:06:34.000000000 -0400
63643 +++ linux-2.6.39.4/mm/percpu.c 2011-08-05 19:44:37.000000000 -0400
63644 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63645 static unsigned int pcpu_last_unit_cpu __read_mostly;
63646
63647 /* the address of the first chunk which starts with the kernel static area */
63648 -void *pcpu_base_addr __read_mostly;
63649 +void *pcpu_base_addr __read_only;
63650 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63651
63652 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63653 diff -urNp linux-2.6.39.4/mm/rmap.c linux-2.6.39.4/mm/rmap.c
63654 --- linux-2.6.39.4/mm/rmap.c 2011-05-19 00:06:34.000000000 -0400
63655 +++ linux-2.6.39.4/mm/rmap.c 2011-08-05 19:44:37.000000000 -0400
63656 @@ -131,6 +131,10 @@ int anon_vma_prepare(struct vm_area_stru
63657 struct anon_vma *anon_vma = vma->anon_vma;
63658 struct anon_vma_chain *avc;
63659
63660 +#ifdef CONFIG_PAX_SEGMEXEC
63661 + struct anon_vma_chain *avc_m = NULL;
63662 +#endif
63663 +
63664 might_sleep();
63665 if (unlikely(!anon_vma)) {
63666 struct mm_struct *mm = vma->vm_mm;
63667 @@ -140,6 +144,12 @@ int anon_vma_prepare(struct vm_area_stru
63668 if (!avc)
63669 goto out_enomem;
63670
63671 +#ifdef CONFIG_PAX_SEGMEXEC
63672 + avc_m = anon_vma_chain_alloc();
63673 + if (!avc_m)
63674 + goto out_enomem_free_avc;
63675 +#endif
63676 +
63677 anon_vma = find_mergeable_anon_vma(vma);
63678 allocated = NULL;
63679 if (!anon_vma) {
63680 @@ -153,6 +163,21 @@ int anon_vma_prepare(struct vm_area_stru
63681 /* page_table_lock to protect against threads */
63682 spin_lock(&mm->page_table_lock);
63683 if (likely(!vma->anon_vma)) {
63684 +
63685 +#ifdef CONFIG_PAX_SEGMEXEC
63686 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63687 +
63688 + if (vma_m) {
63689 + BUG_ON(vma_m->anon_vma);
63690 + vma_m->anon_vma = anon_vma;
63691 + avc_m->anon_vma = anon_vma;
63692 + avc_m->vma = vma;
63693 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63694 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
63695 + avc_m = NULL;
63696 + }
63697 +#endif
63698 +
63699 vma->anon_vma = anon_vma;
63700 avc->anon_vma = anon_vma;
63701 avc->vma = vma;
63702 @@ -166,12 +191,24 @@ int anon_vma_prepare(struct vm_area_stru
63703
63704 if (unlikely(allocated))
63705 put_anon_vma(allocated);
63706 +
63707 +#ifdef CONFIG_PAX_SEGMEXEC
63708 + if (unlikely(avc_m))
63709 + anon_vma_chain_free(avc_m);
63710 +#endif
63711 +
63712 if (unlikely(avc))
63713 anon_vma_chain_free(avc);
63714 }
63715 return 0;
63716
63717 out_enomem_free_avc:
63718 +
63719 +#ifdef CONFIG_PAX_SEGMEXEC
63720 + if (avc_m)
63721 + anon_vma_chain_free(avc_m);
63722 +#endif
63723 +
63724 anon_vma_chain_free(avc);
63725 out_enomem:
63726 return -ENOMEM;
63727 @@ -198,7 +235,7 @@ static void anon_vma_chain_link(struct v
63728 * Attach the anon_vmas from src to dst.
63729 * Returns 0 on success, -ENOMEM on failure.
63730 */
63731 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63732 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63733 {
63734 struct anon_vma_chain *avc, *pavc;
63735
63736 @@ -220,7 +257,7 @@ int anon_vma_clone(struct vm_area_struct
63737 * the corresponding VMA in the parent process is attached to.
63738 * Returns 0 on success, non-zero on failure.
63739 */
63740 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63741 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63742 {
63743 struct anon_vma_chain *avc;
63744 struct anon_vma *anon_vma;
63745 diff -urNp linux-2.6.39.4/mm/shmem.c linux-2.6.39.4/mm/shmem.c
63746 --- linux-2.6.39.4/mm/shmem.c 2011-06-03 00:04:14.000000000 -0400
63747 +++ linux-2.6.39.4/mm/shmem.c 2011-08-05 19:44:37.000000000 -0400
63748 @@ -31,7 +31,7 @@
63749 #include <linux/percpu_counter.h>
63750 #include <linux/swap.h>
63751
63752 -static struct vfsmount *shm_mnt;
63753 +struct vfsmount *shm_mnt;
63754
63755 #ifdef CONFIG_SHMEM
63756 /*
63757 @@ -1087,6 +1087,8 @@ static int shmem_writepage(struct page *
63758 goto unlock;
63759 }
63760 entry = shmem_swp_entry(info, index, NULL);
63761 + if (!entry)
63762 + goto unlock;
63763 if (entry->val) {
63764 /*
63765 * The more uptodate page coming down from a stacked
63766 @@ -1158,6 +1160,8 @@ static struct page *shmem_swapin(swp_ent
63767 struct vm_area_struct pvma;
63768 struct page *page;
63769
63770 + pax_track_stack();
63771 +
63772 spol = mpol_cond_copy(&mpol,
63773 mpol_shared_policy_lookup(&info->policy, idx));
63774
63775 @@ -2014,7 +2018,7 @@ static int shmem_symlink(struct inode *d
63776
63777 info = SHMEM_I(inode);
63778 inode->i_size = len-1;
63779 - if (len <= (char *)inode - (char *)info) {
63780 + if (len <= (char *)inode - (char *)info && len <= 64) {
63781 /* do it inline */
63782 memcpy(info, symname, len);
63783 inode->i_op = &shmem_symlink_inline_operations;
63784 @@ -2362,8 +2366,7 @@ int shmem_fill_super(struct super_block
63785 int err = -ENOMEM;
63786
63787 /* Round up to L1_CACHE_BYTES to resist false sharing */
63788 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63789 - L1_CACHE_BYTES), GFP_KERNEL);
63790 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63791 if (!sbinfo)
63792 return -ENOMEM;
63793
63794 diff -urNp linux-2.6.39.4/mm/slab.c linux-2.6.39.4/mm/slab.c
63795 --- linux-2.6.39.4/mm/slab.c 2011-05-19 00:06:34.000000000 -0400
63796 +++ linux-2.6.39.4/mm/slab.c 2011-08-05 19:44:37.000000000 -0400
63797 @@ -150,7 +150,7 @@
63798
63799 /* Legal flag mask for kmem_cache_create(). */
63800 #if DEBUG
63801 -# define CREATE_MASK (SLAB_RED_ZONE | \
63802 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63803 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63804 SLAB_CACHE_DMA | \
63805 SLAB_STORE_USER | \
63806 @@ -158,7 +158,7 @@
63807 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63808 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63809 #else
63810 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63811 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63812 SLAB_CACHE_DMA | \
63813 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63814 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63815 @@ -287,7 +287,7 @@ struct kmem_list3 {
63816 * Need this for bootstrapping a per node allocator.
63817 */
63818 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63819 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63820 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63821 #define CACHE_CACHE 0
63822 #define SIZE_AC MAX_NUMNODES
63823 #define SIZE_L3 (2 * MAX_NUMNODES)
63824 @@ -388,10 +388,10 @@ static void kmem_list3_init(struct kmem_
63825 if ((x)->max_freeable < i) \
63826 (x)->max_freeable = i; \
63827 } while (0)
63828 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63829 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63830 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63831 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63832 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63833 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63834 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63835 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63836 #else
63837 #define STATS_INC_ACTIVE(x) do { } while (0)
63838 #define STATS_DEC_ACTIVE(x) do { } while (0)
63839 @@ -537,7 +537,7 @@ static inline void *index_to_obj(struct
63840 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63841 */
63842 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63843 - const struct slab *slab, void *obj)
63844 + const struct slab *slab, const void *obj)
63845 {
63846 u32 offset = (obj - slab->s_mem);
63847 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63848 @@ -563,7 +563,7 @@ struct cache_names {
63849 static struct cache_names __initdata cache_names[] = {
63850 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63851 #include <linux/kmalloc_sizes.h>
63852 - {NULL,}
63853 + {NULL}
63854 #undef CACHE
63855 };
63856
63857 @@ -1529,7 +1529,7 @@ void __init kmem_cache_init(void)
63858 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63859 sizes[INDEX_AC].cs_size,
63860 ARCH_KMALLOC_MINALIGN,
63861 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63862 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63863 NULL);
63864
63865 if (INDEX_AC != INDEX_L3) {
63866 @@ -1537,7 +1537,7 @@ void __init kmem_cache_init(void)
63867 kmem_cache_create(names[INDEX_L3].name,
63868 sizes[INDEX_L3].cs_size,
63869 ARCH_KMALLOC_MINALIGN,
63870 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63871 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63872 NULL);
63873 }
63874
63875 @@ -1555,7 +1555,7 @@ void __init kmem_cache_init(void)
63876 sizes->cs_cachep = kmem_cache_create(names->name,
63877 sizes->cs_size,
63878 ARCH_KMALLOC_MINALIGN,
63879 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63880 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63881 NULL);
63882 }
63883 #ifdef CONFIG_ZONE_DMA
63884 @@ -4270,10 +4270,10 @@ static int s_show(struct seq_file *m, vo
63885 }
63886 /* cpu stats */
63887 {
63888 - unsigned long allochit = atomic_read(&cachep->allochit);
63889 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63890 - unsigned long freehit = atomic_read(&cachep->freehit);
63891 - unsigned long freemiss = atomic_read(&cachep->freemiss);
63892 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63893 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63894 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63895 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63896
63897 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63898 allochit, allocmiss, freehit, freemiss);
63899 @@ -4530,15 +4530,66 @@ static const struct file_operations proc
63900
63901 static int __init slab_proc_init(void)
63902 {
63903 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63904 + mode_t gr_mode = S_IRUGO;
63905 +
63906 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63907 + gr_mode = S_IRUSR;
63908 +#endif
63909 +
63910 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
63911 #ifdef CONFIG_DEBUG_SLAB_LEAK
63912 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
63913 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
63914 #endif
63915 return 0;
63916 }
63917 module_init(slab_proc_init);
63918 #endif
63919
63920 +void check_object_size(const void *ptr, unsigned long n, bool to)
63921 +{
63922 +
63923 +#ifdef CONFIG_PAX_USERCOPY
63924 + struct page *page;
63925 + struct kmem_cache *cachep = NULL;
63926 + struct slab *slabp;
63927 + unsigned int objnr;
63928 + unsigned long offset;
63929 +
63930 + if (!n)
63931 + return;
63932 +
63933 + if (ZERO_OR_NULL_PTR(ptr))
63934 + goto report;
63935 +
63936 + if (!virt_addr_valid(ptr))
63937 + return;
63938 +
63939 + page = virt_to_head_page(ptr);
63940 +
63941 + if (!PageSlab(page)) {
63942 + if (object_is_on_stack(ptr, n) == -1)
63943 + goto report;
63944 + return;
63945 + }
63946 +
63947 + cachep = page_get_cache(page);
63948 + if (!(cachep->flags & SLAB_USERCOPY))
63949 + goto report;
63950 +
63951 + slabp = page_get_slab(page);
63952 + objnr = obj_to_index(cachep, slabp, ptr);
63953 + BUG_ON(objnr >= cachep->num);
63954 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
63955 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
63956 + return;
63957 +
63958 +report:
63959 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
63960 +#endif
63961 +
63962 +}
63963 +EXPORT_SYMBOL(check_object_size);
63964 +
63965 /**
63966 * ksize - get the actual amount of memory allocated for a given object
63967 * @objp: Pointer to the object
63968 diff -urNp linux-2.6.39.4/mm/slob.c linux-2.6.39.4/mm/slob.c
63969 --- linux-2.6.39.4/mm/slob.c 2011-05-19 00:06:34.000000000 -0400
63970 +++ linux-2.6.39.4/mm/slob.c 2011-08-05 19:44:37.000000000 -0400
63971 @@ -29,7 +29,7 @@
63972 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
63973 * alloc_pages() directly, allocating compound pages so the page order
63974 * does not have to be separately tracked, and also stores the exact
63975 - * allocation size in page->private so that it can be used to accurately
63976 + * allocation size in slob_page->size so that it can be used to accurately
63977 * provide ksize(). These objects are detected in kfree() because slob_page()
63978 * is false for them.
63979 *
63980 @@ -58,6 +58,7 @@
63981 */
63982
63983 #include <linux/kernel.h>
63984 +#include <linux/sched.h>
63985 #include <linux/slab.h>
63986 #include <linux/mm.h>
63987 #include <linux/swap.h> /* struct reclaim_state */
63988 @@ -102,7 +103,8 @@ struct slob_page {
63989 unsigned long flags; /* mandatory */
63990 atomic_t _count; /* mandatory */
63991 slobidx_t units; /* free units left in page */
63992 - unsigned long pad[2];
63993 + unsigned long pad[1];
63994 + unsigned long size; /* size when >=PAGE_SIZE */
63995 slob_t *free; /* first free slob_t in page */
63996 struct list_head list; /* linked list of free pages */
63997 };
63998 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
63999 */
64000 static inline int is_slob_page(struct slob_page *sp)
64001 {
64002 - return PageSlab((struct page *)sp);
64003 + return PageSlab((struct page *)sp) && !sp->size;
64004 }
64005
64006 static inline void set_slob_page(struct slob_page *sp)
64007 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
64008
64009 static inline struct slob_page *slob_page(const void *addr)
64010 {
64011 - return (struct slob_page *)virt_to_page(addr);
64012 + return (struct slob_page *)virt_to_head_page(addr);
64013 }
64014
64015 /*
64016 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
64017 /*
64018 * Return the size of a slob block.
64019 */
64020 -static slobidx_t slob_units(slob_t *s)
64021 +static slobidx_t slob_units(const slob_t *s)
64022 {
64023 if (s->units > 0)
64024 return s->units;
64025 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
64026 /*
64027 * Return the next free slob block pointer after this one.
64028 */
64029 -static slob_t *slob_next(slob_t *s)
64030 +static slob_t *slob_next(const slob_t *s)
64031 {
64032 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
64033 slobidx_t next;
64034 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
64035 /*
64036 * Returns true if s is the last free block in its page.
64037 */
64038 -static int slob_last(slob_t *s)
64039 +static int slob_last(const slob_t *s)
64040 {
64041 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
64042 }
64043 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
64044 if (!page)
64045 return NULL;
64046
64047 + set_slob_page(page);
64048 return page_address(page);
64049 }
64050
64051 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
64052 if (!b)
64053 return NULL;
64054 sp = slob_page(b);
64055 - set_slob_page(sp);
64056
64057 spin_lock_irqsave(&slob_lock, flags);
64058 sp->units = SLOB_UNITS(PAGE_SIZE);
64059 sp->free = b;
64060 + sp->size = 0;
64061 INIT_LIST_HEAD(&sp->list);
64062 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
64063 set_slob_page_free(sp, slob_list);
64064 @@ -476,10 +479,9 @@ out:
64065 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
64066 */
64067
64068 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64069 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
64070 {
64071 - unsigned int *m;
64072 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64073 + slob_t *m;
64074 void *ret;
64075
64076 lockdep_trace_alloc(gfp);
64077 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
64078
64079 if (!m)
64080 return NULL;
64081 - *m = size;
64082 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
64083 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
64084 + m[0].units = size;
64085 + m[1].units = align;
64086 ret = (void *)m + align;
64087
64088 trace_kmalloc_node(_RET_IP_, ret,
64089 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
64090 gfp |= __GFP_COMP;
64091 ret = slob_new_pages(gfp, order, node);
64092 if (ret) {
64093 - struct page *page;
64094 - page = virt_to_page(ret);
64095 - page->private = size;
64096 + struct slob_page *sp;
64097 + sp = slob_page(ret);
64098 + sp->size = size;
64099 }
64100
64101 trace_kmalloc_node(_RET_IP_, ret,
64102 size, PAGE_SIZE << order, gfp, node);
64103 }
64104
64105 - kmemleak_alloc(ret, size, 1, gfp);
64106 + return ret;
64107 +}
64108 +
64109 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64110 +{
64111 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64112 + void *ret = __kmalloc_node_align(size, gfp, node, align);
64113 +
64114 + if (!ZERO_OR_NULL_PTR(ret))
64115 + kmemleak_alloc(ret, size, 1, gfp);
64116 return ret;
64117 }
64118 EXPORT_SYMBOL(__kmalloc_node);
64119 @@ -531,13 +545,88 @@ void kfree(const void *block)
64120 sp = slob_page(block);
64121 if (is_slob_page(sp)) {
64122 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64123 - unsigned int *m = (unsigned int *)(block - align);
64124 - slob_free(m, *m + align);
64125 - } else
64126 + slob_t *m = (slob_t *)(block - align);
64127 + slob_free(m, m[0].units + align);
64128 + } else {
64129 + clear_slob_page(sp);
64130 + free_slob_page(sp);
64131 + sp->size = 0;
64132 put_page(&sp->page);
64133 + }
64134 }
64135 EXPORT_SYMBOL(kfree);
64136
64137 +void check_object_size(const void *ptr, unsigned long n, bool to)
64138 +{
64139 +
64140 +#ifdef CONFIG_PAX_USERCOPY
64141 + struct slob_page *sp;
64142 + const slob_t *free;
64143 + const void *base;
64144 + unsigned long flags;
64145 +
64146 + if (!n)
64147 + return;
64148 +
64149 + if (ZERO_OR_NULL_PTR(ptr))
64150 + goto report;
64151 +
64152 + if (!virt_addr_valid(ptr))
64153 + return;
64154 +
64155 + sp = slob_page(ptr);
64156 + if (!PageSlab((struct page*)sp)) {
64157 + if (object_is_on_stack(ptr, n) == -1)
64158 + goto report;
64159 + return;
64160 + }
64161 +
64162 + if (sp->size) {
64163 + base = page_address(&sp->page);
64164 + if (base <= ptr && n <= sp->size - (ptr - base))
64165 + return;
64166 + goto report;
64167 + }
64168 +
64169 + /* some tricky double walking to find the chunk */
64170 + spin_lock_irqsave(&slob_lock, flags);
64171 + base = (void *)((unsigned long)ptr & PAGE_MASK);
64172 + free = sp->free;
64173 +
64174 + while (!slob_last(free) && (void *)free <= ptr) {
64175 + base = free + slob_units(free);
64176 + free = slob_next(free);
64177 + }
64178 +
64179 + while (base < (void *)free) {
64180 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
64181 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
64182 + int offset;
64183 +
64184 + if (ptr < base + align)
64185 + break;
64186 +
64187 + offset = ptr - base - align;
64188 + if (offset >= m) {
64189 + base += size;
64190 + continue;
64191 + }
64192 +
64193 + if (n > m - offset)
64194 + break;
64195 +
64196 + spin_unlock_irqrestore(&slob_lock, flags);
64197 + return;
64198 + }
64199 +
64200 + spin_unlock_irqrestore(&slob_lock, flags);
64201 +report:
64202 + pax_report_usercopy(ptr, n, to, NULL);
64203 +#endif
64204 +
64205 +}
64206 +EXPORT_SYMBOL(check_object_size);
64207 +
64208 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
64209 size_t ksize(const void *block)
64210 {
64211 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
64212 sp = slob_page(block);
64213 if (is_slob_page(sp)) {
64214 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64215 - unsigned int *m = (unsigned int *)(block - align);
64216 - return SLOB_UNITS(*m) * SLOB_UNIT;
64217 + slob_t *m = (slob_t *)(block - align);
64218 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
64219 } else
64220 - return sp->page.private;
64221 + return sp->size;
64222 }
64223 EXPORT_SYMBOL(ksize);
64224
64225 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
64226 {
64227 struct kmem_cache *c;
64228
64229 +#ifdef CONFIG_PAX_USERCOPY
64230 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
64231 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
64232 +#else
64233 c = slob_alloc(sizeof(struct kmem_cache),
64234 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
64235 +#endif
64236
64237 if (c) {
64238 c->name = name;
64239 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
64240 {
64241 void *b;
64242
64243 +#ifdef CONFIG_PAX_USERCOPY
64244 + b = __kmalloc_node_align(c->size, flags, node, c->align);
64245 +#else
64246 if (c->size < PAGE_SIZE) {
64247 b = slob_alloc(c->size, flags, c->align, node);
64248 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64249 SLOB_UNITS(c->size) * SLOB_UNIT,
64250 flags, node);
64251 } else {
64252 + struct slob_page *sp;
64253 +
64254 b = slob_new_pages(flags, get_order(c->size), node);
64255 + sp = slob_page(b);
64256 + sp->size = c->size;
64257 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64258 PAGE_SIZE << get_order(c->size),
64259 flags, node);
64260 }
64261 +#endif
64262
64263 if (c->ctor)
64264 c->ctor(b);
64265 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
64266
64267 static void __kmem_cache_free(void *b, int size)
64268 {
64269 - if (size < PAGE_SIZE)
64270 + struct slob_page *sp = slob_page(b);
64271 +
64272 + if (is_slob_page(sp))
64273 slob_free(b, size);
64274 - else
64275 + else {
64276 + clear_slob_page(sp);
64277 + free_slob_page(sp);
64278 + sp->size = 0;
64279 slob_free_pages(b, get_order(size));
64280 + }
64281 }
64282
64283 static void kmem_rcu_free(struct rcu_head *head)
64284 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64285
64286 void kmem_cache_free(struct kmem_cache *c, void *b)
64287 {
64288 + int size = c->size;
64289 +
64290 +#ifdef CONFIG_PAX_USERCOPY
64291 + if (size + c->align < PAGE_SIZE) {
64292 + size += c->align;
64293 + b -= c->align;
64294 + }
64295 +#endif
64296 +
64297 kmemleak_free_recursive(b, c->flags);
64298 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64299 struct slob_rcu *slob_rcu;
64300 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64301 - slob_rcu->size = c->size;
64302 + slob_rcu = b + (size - sizeof(struct slob_rcu));
64303 + slob_rcu->size = size;
64304 call_rcu(&slob_rcu->head, kmem_rcu_free);
64305 } else {
64306 - __kmem_cache_free(b, c->size);
64307 + __kmem_cache_free(b, size);
64308 }
64309
64310 +#ifdef CONFIG_PAX_USERCOPY
64311 + trace_kfree(_RET_IP_, b);
64312 +#else
64313 trace_kmem_cache_free(_RET_IP_, b);
64314 +#endif
64315 +
64316 }
64317 EXPORT_SYMBOL(kmem_cache_free);
64318
64319 diff -urNp linux-2.6.39.4/mm/slub.c linux-2.6.39.4/mm/slub.c
64320 --- linux-2.6.39.4/mm/slub.c 2011-06-03 00:04:14.000000000 -0400
64321 +++ linux-2.6.39.4/mm/slub.c 2011-08-05 19:44:37.000000000 -0400
64322 @@ -431,7 +431,7 @@ static void print_track(const char *s, s
64323 if (!t->addr)
64324 return;
64325
64326 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64327 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64328 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64329 }
64330
64331 @@ -2183,6 +2183,8 @@ void kmem_cache_free(struct kmem_cache *
64332
64333 page = virt_to_head_page(x);
64334
64335 + BUG_ON(!PageSlab(page));
64336 +
64337 slab_free(s, page, x, _RET_IP_);
64338
64339 trace_kmem_cache_free(_RET_IP_, x);
64340 @@ -2216,7 +2218,7 @@ static int slub_min_objects;
64341 * Merge control. If this is set then no merging of slab caches will occur.
64342 * (Could be removed. This was introduced to pacify the merge skeptics.)
64343 */
64344 -static int slub_nomerge;
64345 +static int slub_nomerge = 1;
64346
64347 /*
64348 * Calculate the order of allocation given an slab object size.
64349 @@ -2644,7 +2646,7 @@ static int kmem_cache_open(struct kmem_c
64350 * list to avoid pounding the page allocator excessively.
64351 */
64352 set_min_partial(s, ilog2(s->size));
64353 - s->refcount = 1;
64354 + atomic_set(&s->refcount, 1);
64355 #ifdef CONFIG_NUMA
64356 s->remote_node_defrag_ratio = 1000;
64357 #endif
64358 @@ -2750,8 +2752,7 @@ static inline int kmem_cache_close(struc
64359 void kmem_cache_destroy(struct kmem_cache *s)
64360 {
64361 down_write(&slub_lock);
64362 - s->refcount--;
64363 - if (!s->refcount) {
64364 + if (atomic_dec_and_test(&s->refcount)) {
64365 list_del(&s->list);
64366 if (kmem_cache_close(s)) {
64367 printk(KERN_ERR "SLUB %s: %s called for cache that "
64368 @@ -2961,6 +2962,46 @@ void *__kmalloc_node(size_t size, gfp_t
64369 EXPORT_SYMBOL(__kmalloc_node);
64370 #endif
64371
64372 +void check_object_size(const void *ptr, unsigned long n, bool to)
64373 +{
64374 +
64375 +#ifdef CONFIG_PAX_USERCOPY
64376 + struct page *page;
64377 + struct kmem_cache *s = NULL;
64378 + unsigned long offset;
64379 +
64380 + if (!n)
64381 + return;
64382 +
64383 + if (ZERO_OR_NULL_PTR(ptr))
64384 + goto report;
64385 +
64386 + if (!virt_addr_valid(ptr))
64387 + return;
64388 +
64389 + page = virt_to_head_page(ptr);
64390 +
64391 + if (!PageSlab(page)) {
64392 + if (object_is_on_stack(ptr, n) == -1)
64393 + goto report;
64394 + return;
64395 + }
64396 +
64397 + s = page->slab;
64398 + if (!(s->flags & SLAB_USERCOPY))
64399 + goto report;
64400 +
64401 + offset = (ptr - page_address(page)) % s->size;
64402 + if (offset <= s->objsize && n <= s->objsize - offset)
64403 + return;
64404 +
64405 +report:
64406 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64407 +#endif
64408 +
64409 +}
64410 +EXPORT_SYMBOL(check_object_size);
64411 +
64412 size_t ksize(const void *object)
64413 {
64414 struct page *page;
64415 @@ -3205,7 +3246,7 @@ static void __init kmem_cache_bootstrap_
64416 int node;
64417
64418 list_add(&s->list, &slab_caches);
64419 - s->refcount = -1;
64420 + atomic_set(&s->refcount, -1);
64421
64422 for_each_node_state(node, N_NORMAL_MEMORY) {
64423 struct kmem_cache_node *n = get_node(s, node);
64424 @@ -3322,17 +3363,17 @@ void __init kmem_cache_init(void)
64425
64426 /* Caches that are not of the two-to-the-power-of size */
64427 if (KMALLOC_MIN_SIZE <= 32) {
64428 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64429 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64430 caches++;
64431 }
64432
64433 if (KMALLOC_MIN_SIZE <= 64) {
64434 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64435 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64436 caches++;
64437 }
64438
64439 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64440 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64441 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64442 caches++;
64443 }
64444
64445 @@ -3400,7 +3441,7 @@ static int slab_unmergeable(struct kmem_
64446 /*
64447 * We may have set a slab to be unmergeable during bootstrap.
64448 */
64449 - if (s->refcount < 0)
64450 + if (atomic_read(&s->refcount) < 0)
64451 return 1;
64452
64453 return 0;
64454 @@ -3459,7 +3500,7 @@ struct kmem_cache *kmem_cache_create(con
64455 down_write(&slub_lock);
64456 s = find_mergeable(size, align, flags, name, ctor);
64457 if (s) {
64458 - s->refcount++;
64459 + atomic_inc(&s->refcount);
64460 /*
64461 * Adjust the object sizes so that we clear
64462 * the complete object on kzalloc.
64463 @@ -3468,7 +3509,7 @@ struct kmem_cache *kmem_cache_create(con
64464 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64465
64466 if (sysfs_slab_alias(s, name)) {
64467 - s->refcount--;
64468 + atomic_dec(&s->refcount);
64469 goto err;
64470 }
64471 up_write(&slub_lock);
64472 @@ -4201,7 +4242,7 @@ SLAB_ATTR_RO(ctor);
64473
64474 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64475 {
64476 - return sprintf(buf, "%d\n", s->refcount - 1);
64477 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64478 }
64479 SLAB_ATTR_RO(aliases);
64480
64481 @@ -4945,7 +4986,13 @@ static const struct file_operations proc
64482
64483 static int __init slab_proc_init(void)
64484 {
64485 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64486 + mode_t gr_mode = S_IRUGO;
64487 +
64488 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64489 + gr_mode = S_IRUSR;
64490 +#endif
64491 +
64492 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64493 return 0;
64494 }
64495 module_init(slab_proc_init);
64496 diff -urNp linux-2.6.39.4/mm/swap.c linux-2.6.39.4/mm/swap.c
64497 --- linux-2.6.39.4/mm/swap.c 2011-05-19 00:06:34.000000000 -0400
64498 +++ linux-2.6.39.4/mm/swap.c 2011-08-05 19:44:37.000000000 -0400
64499 @@ -31,6 +31,7 @@
64500 #include <linux/backing-dev.h>
64501 #include <linux/memcontrol.h>
64502 #include <linux/gfp.h>
64503 +#include <linux/hugetlb.h>
64504
64505 #include "internal.h"
64506
64507 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64508
64509 __page_cache_release(page);
64510 dtor = get_compound_page_dtor(page);
64511 + if (!PageHuge(page))
64512 + BUG_ON(dtor != free_compound_page);
64513 (*dtor)(page);
64514 }
64515
64516 diff -urNp linux-2.6.39.4/mm/swapfile.c linux-2.6.39.4/mm/swapfile.c
64517 --- linux-2.6.39.4/mm/swapfile.c 2011-05-19 00:06:34.000000000 -0400
64518 +++ linux-2.6.39.4/mm/swapfile.c 2011-08-05 19:44:37.000000000 -0400
64519 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
64520
64521 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64522 /* Activity counter to indicate that a swapon or swapoff has occurred */
64523 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
64524 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64525
64526 static inline unsigned char swap_count(unsigned char ent)
64527 {
64528 @@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64529 }
64530 filp_close(swap_file, NULL);
64531 err = 0;
64532 - atomic_inc(&proc_poll_event);
64533 + atomic_inc_unchecked(&proc_poll_event);
64534 wake_up_interruptible(&proc_poll_wait);
64535
64536 out_dput:
64537 @@ -1690,8 +1690,8 @@ static unsigned swaps_poll(struct file *
64538
64539 poll_wait(file, &proc_poll_wait, wait);
64540
64541 - if (s->event != atomic_read(&proc_poll_event)) {
64542 - s->event = atomic_read(&proc_poll_event);
64543 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64544 + s->event = atomic_read_unchecked(&proc_poll_event);
64545 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64546 }
64547
64548 @@ -1797,7 +1797,7 @@ static int swaps_open(struct inode *inod
64549 }
64550
64551 s->seq.private = s;
64552 - s->event = atomic_read(&proc_poll_event);
64553 + s->event = atomic_read_unchecked(&proc_poll_event);
64554 return ret;
64555 }
64556
64557 @@ -2131,7 +2131,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64558 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64559
64560 mutex_unlock(&swapon_mutex);
64561 - atomic_inc(&proc_poll_event);
64562 + atomic_inc_unchecked(&proc_poll_event);
64563 wake_up_interruptible(&proc_poll_wait);
64564
64565 if (S_ISREG(inode->i_mode))
64566 diff -urNp linux-2.6.39.4/mm/util.c linux-2.6.39.4/mm/util.c
64567 --- linux-2.6.39.4/mm/util.c 2011-05-19 00:06:34.000000000 -0400
64568 +++ linux-2.6.39.4/mm/util.c 2011-08-05 19:44:37.000000000 -0400
64569 @@ -112,6 +112,7 @@ EXPORT_SYMBOL(memdup_user);
64570 * allocated buffer. Use this if you don't want to free the buffer immediately
64571 * like, for example, with RCU.
64572 */
64573 +#undef __krealloc
64574 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64575 {
64576 void *ret;
64577 @@ -145,6 +146,7 @@ EXPORT_SYMBOL(__krealloc);
64578 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64579 * %NULL pointer, the object pointed to is freed.
64580 */
64581 +#undef krealloc
64582 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64583 {
64584 void *ret;
64585 @@ -219,6 +221,12 @@ EXPORT_SYMBOL(strndup_user);
64586 void arch_pick_mmap_layout(struct mm_struct *mm)
64587 {
64588 mm->mmap_base = TASK_UNMAPPED_BASE;
64589 +
64590 +#ifdef CONFIG_PAX_RANDMMAP
64591 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64592 + mm->mmap_base += mm->delta_mmap;
64593 +#endif
64594 +
64595 mm->get_unmapped_area = arch_get_unmapped_area;
64596 mm->unmap_area = arch_unmap_area;
64597 }
64598 diff -urNp linux-2.6.39.4/mm/vmalloc.c linux-2.6.39.4/mm/vmalloc.c
64599 --- linux-2.6.39.4/mm/vmalloc.c 2011-05-19 00:06:34.000000000 -0400
64600 +++ linux-2.6.39.4/mm/vmalloc.c 2011-08-05 19:44:37.000000000 -0400
64601 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64602
64603 pte = pte_offset_kernel(pmd, addr);
64604 do {
64605 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64606 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64607 +
64608 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64609 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64610 + BUG_ON(!pte_exec(*pte));
64611 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64612 + continue;
64613 + }
64614 +#endif
64615 +
64616 + {
64617 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64618 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64619 + }
64620 } while (pte++, addr += PAGE_SIZE, addr != end);
64621 }
64622
64623 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64624 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64625 {
64626 pte_t *pte;
64627 + int ret = -ENOMEM;
64628
64629 /*
64630 * nr is a running index into the array which helps higher level
64631 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64632 pte = pte_alloc_kernel(pmd, addr);
64633 if (!pte)
64634 return -ENOMEM;
64635 +
64636 + pax_open_kernel();
64637 do {
64638 struct page *page = pages[*nr];
64639
64640 - if (WARN_ON(!pte_none(*pte)))
64641 - return -EBUSY;
64642 - if (WARN_ON(!page))
64643 - return -ENOMEM;
64644 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64645 + if (pgprot_val(prot) & _PAGE_NX)
64646 +#endif
64647 +
64648 + if (WARN_ON(!pte_none(*pte))) {
64649 + ret = -EBUSY;
64650 + goto out;
64651 + }
64652 + if (WARN_ON(!page)) {
64653 + ret = -ENOMEM;
64654 + goto out;
64655 + }
64656 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64657 (*nr)++;
64658 } while (pte++, addr += PAGE_SIZE, addr != end);
64659 - return 0;
64660 + ret = 0;
64661 +out:
64662 + pax_close_kernel();
64663 + return ret;
64664 }
64665
64666 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64667 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64668 * and fall back on vmalloc() if that fails. Others
64669 * just put it in the vmalloc space.
64670 */
64671 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64672 +#ifdef CONFIG_MODULES
64673 +#ifdef MODULES_VADDR
64674 unsigned long addr = (unsigned long)x;
64675 if (addr >= MODULES_VADDR && addr < MODULES_END)
64676 return 1;
64677 #endif
64678 +
64679 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64680 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64681 + return 1;
64682 +#endif
64683 +
64684 +#endif
64685 +
64686 return is_vmalloc_addr(x);
64687 }
64688
64689 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64690
64691 if (!pgd_none(*pgd)) {
64692 pud_t *pud = pud_offset(pgd, addr);
64693 +#ifdef CONFIG_X86
64694 + if (!pud_large(*pud))
64695 +#endif
64696 if (!pud_none(*pud)) {
64697 pmd_t *pmd = pmd_offset(pud, addr);
64698 +#ifdef CONFIG_X86
64699 + if (!pmd_large(*pmd))
64700 +#endif
64701 if (!pmd_none(*pmd)) {
64702 pte_t *ptep, pte;
64703
64704 @@ -1296,6 +1336,16 @@ static struct vm_struct *__get_vm_area_n
64705 struct vm_struct *area;
64706
64707 BUG_ON(in_interrupt());
64708 +
64709 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64710 + if (flags & VM_KERNEXEC) {
64711 + if (start != VMALLOC_START || end != VMALLOC_END)
64712 + return NULL;
64713 + start = (unsigned long)MODULES_EXEC_VADDR;
64714 + end = (unsigned long)MODULES_EXEC_END;
64715 + }
64716 +#endif
64717 +
64718 if (flags & VM_IOREMAP) {
64719 int bit = fls(size);
64720
64721 @@ -1514,6 +1564,11 @@ void *vmap(struct page **pages, unsigned
64722 if (count > totalram_pages)
64723 return NULL;
64724
64725 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64726 + if (!(pgprot_val(prot) & _PAGE_NX))
64727 + flags |= VM_KERNEXEC;
64728 +#endif
64729 +
64730 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64731 __builtin_return_address(0));
64732 if (!area)
64733 @@ -1610,6 +1665,13 @@ void *__vmalloc_node_range(unsigned long
64734 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64735 return NULL;
64736
64737 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64738 + if (!(pgprot_val(prot) & _PAGE_NX))
64739 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64740 + node, gfp_mask, caller);
64741 + else
64742 +#endif
64743 +
64744 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64745 gfp_mask, caller);
64746
64747 @@ -1649,6 +1711,7 @@ static void *__vmalloc_node(unsigned lon
64748 gfp_mask, prot, node, caller);
64749 }
64750
64751 +#undef __vmalloc
64752 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64753 {
64754 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64755 @@ -1672,6 +1735,7 @@ static inline void *__vmalloc_node_flags
64756 * For tight control over page level allocator and protection flags
64757 * use __vmalloc() instead.
64758 */
64759 +#undef vmalloc
64760 void *vmalloc(unsigned long size)
64761 {
64762 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64763 @@ -1688,6 +1752,7 @@ EXPORT_SYMBOL(vmalloc);
64764 * For tight control over page level allocator and protection flags
64765 * use __vmalloc() instead.
64766 */
64767 +#undef vzalloc
64768 void *vzalloc(unsigned long size)
64769 {
64770 return __vmalloc_node_flags(size, -1,
64771 @@ -1702,6 +1767,7 @@ EXPORT_SYMBOL(vzalloc);
64772 * The resulting memory area is zeroed so it can be mapped to userspace
64773 * without leaking data.
64774 */
64775 +#undef vmalloc_user
64776 void *vmalloc_user(unsigned long size)
64777 {
64778 struct vm_struct *area;
64779 @@ -1729,6 +1795,7 @@ EXPORT_SYMBOL(vmalloc_user);
64780 * For tight control over page level allocator and protection flags
64781 * use __vmalloc() instead.
64782 */
64783 +#undef vmalloc_node
64784 void *vmalloc_node(unsigned long size, int node)
64785 {
64786 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64787 @@ -1748,6 +1815,7 @@ EXPORT_SYMBOL(vmalloc_node);
64788 * For tight control over page level allocator and protection flags
64789 * use __vmalloc_node() instead.
64790 */
64791 +#undef vzalloc_node
64792 void *vzalloc_node(unsigned long size, int node)
64793 {
64794 return __vmalloc_node_flags(size, node,
64795 @@ -1770,10 +1838,10 @@ EXPORT_SYMBOL(vzalloc_node);
64796 * For tight control over page level allocator and protection flags
64797 * use __vmalloc() instead.
64798 */
64799 -
64800 +#undef vmalloc_exec
64801 void *vmalloc_exec(unsigned long size)
64802 {
64803 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64804 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64805 -1, __builtin_return_address(0));
64806 }
64807
64808 @@ -1792,6 +1860,7 @@ void *vmalloc_exec(unsigned long size)
64809 * Allocate enough 32bit PA addressable pages to cover @size from the
64810 * page level allocator and map them into contiguous kernel virtual space.
64811 */
64812 +#undef vmalloc_32
64813 void *vmalloc_32(unsigned long size)
64814 {
64815 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64816 @@ -1806,6 +1875,7 @@ EXPORT_SYMBOL(vmalloc_32);
64817 * The resulting memory area is 32bit addressable and zeroed so it can be
64818 * mapped to userspace without leaking data.
64819 */
64820 +#undef vmalloc_32_user
64821 void *vmalloc_32_user(unsigned long size)
64822 {
64823 struct vm_struct *area;
64824 @@ -2068,6 +2138,8 @@ int remap_vmalloc_range(struct vm_area_s
64825 unsigned long uaddr = vma->vm_start;
64826 unsigned long usize = vma->vm_end - vma->vm_start;
64827
64828 + BUG_ON(vma->vm_mirror);
64829 +
64830 if ((PAGE_SIZE-1) & (unsigned long)addr)
64831 return -EINVAL;
64832
64833 diff -urNp linux-2.6.39.4/mm/vmstat.c linux-2.6.39.4/mm/vmstat.c
64834 --- linux-2.6.39.4/mm/vmstat.c 2011-05-19 00:06:34.000000000 -0400
64835 +++ linux-2.6.39.4/mm/vmstat.c 2011-08-05 19:44:37.000000000 -0400
64836 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64837 *
64838 * vm_stat contains the global counters
64839 */
64840 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64841 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64842 EXPORT_SYMBOL(vm_stat);
64843
64844 #ifdef CONFIG_SMP
64845 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64846 v = p->vm_stat_diff[i];
64847 p->vm_stat_diff[i] = 0;
64848 local_irq_restore(flags);
64849 - atomic_long_add(v, &zone->vm_stat[i]);
64850 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64851 global_diff[i] += v;
64852 #ifdef CONFIG_NUMA
64853 /* 3 seconds idle till flush */
64854 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64855
64856 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64857 if (global_diff[i])
64858 - atomic_long_add(global_diff[i], &vm_stat[i]);
64859 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64860 }
64861
64862 #endif
64863 @@ -1205,10 +1205,20 @@ static int __init setup_vmstat(void)
64864 start_cpu_timer(cpu);
64865 #endif
64866 #ifdef CONFIG_PROC_FS
64867 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64868 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64869 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64870 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64871 + {
64872 + mode_t gr_mode = S_IRUGO;
64873 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64874 + gr_mode = S_IRUSR;
64875 +#endif
64876 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64877 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64878 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64879 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64880 +#else
64881 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64882 +#endif
64883 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64884 + }
64885 #endif
64886 return 0;
64887 }
64888 diff -urNp linux-2.6.39.4/net/8021q/vlan.c linux-2.6.39.4/net/8021q/vlan.c
64889 --- linux-2.6.39.4/net/8021q/vlan.c 2011-05-19 00:06:34.000000000 -0400
64890 +++ linux-2.6.39.4/net/8021q/vlan.c 2011-08-05 19:44:37.000000000 -0400
64891 @@ -592,8 +592,7 @@ static int vlan_ioctl_handler(struct net
64892 err = -EPERM;
64893 if (!capable(CAP_NET_ADMIN))
64894 break;
64895 - if ((args.u.name_type >= 0) &&
64896 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64897 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64898 struct vlan_net *vn;
64899
64900 vn = net_generic(net, vlan_net_id);
64901 diff -urNp linux-2.6.39.4/net/atm/atm_misc.c linux-2.6.39.4/net/atm/atm_misc.c
64902 --- linux-2.6.39.4/net/atm/atm_misc.c 2011-05-19 00:06:34.000000000 -0400
64903 +++ linux-2.6.39.4/net/atm/atm_misc.c 2011-08-05 19:44:37.000000000 -0400
64904 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64905 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64906 return 1;
64907 atm_return(vcc, truesize);
64908 - atomic_inc(&vcc->stats->rx_drop);
64909 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64910 return 0;
64911 }
64912 EXPORT_SYMBOL(atm_charge);
64913 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
64914 }
64915 }
64916 atm_return(vcc, guess);
64917 - atomic_inc(&vcc->stats->rx_drop);
64918 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64919 return NULL;
64920 }
64921 EXPORT_SYMBOL(atm_alloc_charge);
64922 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
64923
64924 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64925 {
64926 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
64927 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
64928 __SONET_ITEMS
64929 #undef __HANDLE_ITEM
64930 }
64931 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
64932
64933 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
64934 {
64935 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
64936 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
64937 __SONET_ITEMS
64938 #undef __HANDLE_ITEM
64939 }
64940 diff -urNp linux-2.6.39.4/net/atm/lec.h linux-2.6.39.4/net/atm/lec.h
64941 --- linux-2.6.39.4/net/atm/lec.h 2011-05-19 00:06:34.000000000 -0400
64942 +++ linux-2.6.39.4/net/atm/lec.h 2011-08-05 20:34:06.000000000 -0400
64943 @@ -48,7 +48,7 @@ struct lane2_ops {
64944 const u8 *tlvs, u32 sizeoftlvs);
64945 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
64946 const u8 *tlvs, u32 sizeoftlvs);
64947 -};
64948 +} __no_const;
64949
64950 /*
64951 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
64952 diff -urNp linux-2.6.39.4/net/atm/mpc.h linux-2.6.39.4/net/atm/mpc.h
64953 --- linux-2.6.39.4/net/atm/mpc.h 2011-05-19 00:06:34.000000000 -0400
64954 +++ linux-2.6.39.4/net/atm/mpc.h 2011-08-05 20:34:06.000000000 -0400
64955 @@ -33,7 +33,7 @@ struct mpoa_client {
64956 struct mpc_parameters parameters; /* parameters for this client */
64957
64958 const struct net_device_ops *old_ops;
64959 - struct net_device_ops new_ops;
64960 + net_device_ops_no_const new_ops;
64961 };
64962
64963
64964 diff -urNp linux-2.6.39.4/net/atm/mpoa_caches.c linux-2.6.39.4/net/atm/mpoa_caches.c
64965 --- linux-2.6.39.4/net/atm/mpoa_caches.c 2011-05-19 00:06:34.000000000 -0400
64966 +++ linux-2.6.39.4/net/atm/mpoa_caches.c 2011-08-05 19:44:37.000000000 -0400
64967 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
64968 struct timeval now;
64969 struct k_message msg;
64970
64971 + pax_track_stack();
64972 +
64973 do_gettimeofday(&now);
64974
64975 read_lock_bh(&client->ingress_lock);
64976 diff -urNp linux-2.6.39.4/net/atm/proc.c linux-2.6.39.4/net/atm/proc.c
64977 --- linux-2.6.39.4/net/atm/proc.c 2011-05-19 00:06:34.000000000 -0400
64978 +++ linux-2.6.39.4/net/atm/proc.c 2011-08-05 19:44:37.000000000 -0400
64979 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
64980 const struct k_atm_aal_stats *stats)
64981 {
64982 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
64983 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
64984 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
64985 - atomic_read(&stats->rx_drop));
64986 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
64987 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
64988 + atomic_read_unchecked(&stats->rx_drop));
64989 }
64990
64991 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
64992 @@ -191,7 +191,12 @@ static void vcc_info(struct seq_file *se
64993 {
64994 struct sock *sk = sk_atm(vcc);
64995
64996 +#ifdef CONFIG_GRKERNSEC_HIDESYM
64997 + seq_printf(seq, "%p ", NULL);
64998 +#else
64999 seq_printf(seq, "%p ", vcc);
65000 +#endif
65001 +
65002 if (!vcc->dev)
65003 seq_printf(seq, "Unassigned ");
65004 else
65005 @@ -218,7 +223,11 @@ static void svc_info(struct seq_file *se
65006 {
65007 if (!vcc->dev)
65008 seq_printf(seq, sizeof(void *) == 4 ?
65009 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65010 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
65011 +#else
65012 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
65013 +#endif
65014 else
65015 seq_printf(seq, "%3d %3d %5d ",
65016 vcc->dev->number, vcc->vpi, vcc->vci);
65017 diff -urNp linux-2.6.39.4/net/atm/resources.c linux-2.6.39.4/net/atm/resources.c
65018 --- linux-2.6.39.4/net/atm/resources.c 2011-05-19 00:06:34.000000000 -0400
65019 +++ linux-2.6.39.4/net/atm/resources.c 2011-08-05 19:44:37.000000000 -0400
65020 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
65021 static void copy_aal_stats(struct k_atm_aal_stats *from,
65022 struct atm_aal_stats *to)
65023 {
65024 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
65025 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
65026 __AAL_STAT_ITEMS
65027 #undef __HANDLE_ITEM
65028 }
65029 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
65030 static void subtract_aal_stats(struct k_atm_aal_stats *from,
65031 struct atm_aal_stats *to)
65032 {
65033 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65034 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
65035 __AAL_STAT_ITEMS
65036 #undef __HANDLE_ITEM
65037 }
65038 diff -urNp linux-2.6.39.4/net/batman-adv/hard-interface.c linux-2.6.39.4/net/batman-adv/hard-interface.c
65039 --- linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-05-19 00:06:34.000000000 -0400
65040 +++ linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-08-05 19:44:37.000000000 -0400
65041 @@ -339,8 +339,8 @@ int hardif_enable_interface(struct hard_
65042 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
65043 dev_add_pack(&hard_iface->batman_adv_ptype);
65044
65045 - atomic_set(&hard_iface->seqno, 1);
65046 - atomic_set(&hard_iface->frag_seqno, 1);
65047 + atomic_set_unchecked(&hard_iface->seqno, 1);
65048 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
65049 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
65050 hard_iface->net_dev->name);
65051
65052 diff -urNp linux-2.6.39.4/net/batman-adv/routing.c linux-2.6.39.4/net/batman-adv/routing.c
65053 --- linux-2.6.39.4/net/batman-adv/routing.c 2011-05-19 00:06:34.000000000 -0400
65054 +++ linux-2.6.39.4/net/batman-adv/routing.c 2011-08-05 19:44:37.000000000 -0400
65055 @@ -625,7 +625,7 @@ void receive_bat_packet(struct ethhdr *e
65056 return;
65057
65058 /* could be changed by schedule_own_packet() */
65059 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
65060 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
65061
65062 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
65063
65064 diff -urNp linux-2.6.39.4/net/batman-adv/send.c linux-2.6.39.4/net/batman-adv/send.c
65065 --- linux-2.6.39.4/net/batman-adv/send.c 2011-05-19 00:06:34.000000000 -0400
65066 +++ linux-2.6.39.4/net/batman-adv/send.c 2011-08-05 19:44:37.000000000 -0400
65067 @@ -277,7 +277,7 @@ void schedule_own_packet(struct hard_ifa
65068
65069 /* change sequence number to network order */
65070 batman_packet->seqno =
65071 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
65072 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
65073
65074 if (vis_server == VIS_TYPE_SERVER_SYNC)
65075 batman_packet->flags |= VIS_SERVER;
65076 @@ -291,7 +291,7 @@ void schedule_own_packet(struct hard_ifa
65077 else
65078 batman_packet->gw_flags = 0;
65079
65080 - atomic_inc(&hard_iface->seqno);
65081 + atomic_inc_unchecked(&hard_iface->seqno);
65082
65083 slide_own_bcast_window(hard_iface);
65084 send_time = own_send_time(bat_priv);
65085 diff -urNp linux-2.6.39.4/net/batman-adv/soft-interface.c linux-2.6.39.4/net/batman-adv/soft-interface.c
65086 --- linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-05-19 00:06:34.000000000 -0400
65087 +++ linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-08-05 19:44:37.000000000 -0400
65088 @@ -386,7 +386,7 @@ int interface_tx(struct sk_buff *skb, st
65089
65090 /* set broadcast sequence number */
65091 bcast_packet->seqno =
65092 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
65093 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
65094
65095 add_bcast_packet_to_list(bat_priv, skb);
65096
65097 @@ -579,7 +579,7 @@ struct net_device *softif_create(char *n
65098 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
65099
65100 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
65101 - atomic_set(&bat_priv->bcast_seqno, 1);
65102 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
65103 atomic_set(&bat_priv->hna_local_changed, 0);
65104
65105 bat_priv->primary_if = NULL;
65106 diff -urNp linux-2.6.39.4/net/batman-adv/types.h linux-2.6.39.4/net/batman-adv/types.h
65107 --- linux-2.6.39.4/net/batman-adv/types.h 2011-05-19 00:06:34.000000000 -0400
65108 +++ linux-2.6.39.4/net/batman-adv/types.h 2011-08-05 19:44:37.000000000 -0400
65109 @@ -38,8 +38,8 @@ struct hard_iface {
65110 int16_t if_num;
65111 char if_status;
65112 struct net_device *net_dev;
65113 - atomic_t seqno;
65114 - atomic_t frag_seqno;
65115 + atomic_unchecked_t seqno;
65116 + atomic_unchecked_t frag_seqno;
65117 unsigned char *packet_buff;
65118 int packet_len;
65119 struct kobject *hardif_obj;
65120 @@ -141,7 +141,7 @@ struct bat_priv {
65121 atomic_t orig_interval; /* uint */
65122 atomic_t hop_penalty; /* uint */
65123 atomic_t log_level; /* uint */
65124 - atomic_t bcast_seqno;
65125 + atomic_unchecked_t bcast_seqno;
65126 atomic_t bcast_queue_left;
65127 atomic_t batman_queue_left;
65128 char num_ifaces;
65129 diff -urNp linux-2.6.39.4/net/batman-adv/unicast.c linux-2.6.39.4/net/batman-adv/unicast.c
65130 --- linux-2.6.39.4/net/batman-adv/unicast.c 2011-05-19 00:06:34.000000000 -0400
65131 +++ linux-2.6.39.4/net/batman-adv/unicast.c 2011-08-05 19:44:37.000000000 -0400
65132 @@ -263,7 +263,7 @@ int frag_send_skb(struct sk_buff *skb, s
65133 frag1->flags = UNI_FRAG_HEAD | large_tail;
65134 frag2->flags = large_tail;
65135
65136 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
65137 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
65138 frag1->seqno = htons(seqno - 1);
65139 frag2->seqno = htons(seqno);
65140
65141 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_core.c linux-2.6.39.4/net/bluetooth/l2cap_core.c
65142 --- linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-05-19 00:06:34.000000000 -0400
65143 +++ linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-08-05 19:44:37.000000000 -0400
65144 @@ -2202,7 +2202,7 @@ static inline int l2cap_config_req(struc
65145
65146 /* Reject if config buffer is too small. */
65147 len = cmd_len - sizeof(*req);
65148 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65149 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65150 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
65151 l2cap_build_conf_rsp(sk, rsp,
65152 L2CAP_CONF_REJECT, flags), rsp);
65153 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_sock.c linux-2.6.39.4/net/bluetooth/l2cap_sock.c
65154 --- linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-05-19 00:06:34.000000000 -0400
65155 +++ linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-08-05 19:44:37.000000000 -0400
65156 @@ -446,6 +446,7 @@ static int l2cap_sock_getsockopt_old(str
65157 break;
65158 }
65159
65160 + memset(&cinfo, 0, sizeof(cinfo));
65161 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
65162 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
65163
65164 diff -urNp linux-2.6.39.4/net/bluetooth/rfcomm/sock.c linux-2.6.39.4/net/bluetooth/rfcomm/sock.c
65165 --- linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-05-19 00:06:34.000000000 -0400
65166 +++ linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-08-05 19:44:37.000000000 -0400
65167 @@ -787,6 +787,7 @@ static int rfcomm_sock_getsockopt_old(st
65168
65169 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
65170
65171 + memset(&cinfo, 0, sizeof(cinfo));
65172 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
65173 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
65174
65175 diff -urNp linux-2.6.39.4/net/bridge/br_multicast.c linux-2.6.39.4/net/bridge/br_multicast.c
65176 --- linux-2.6.39.4/net/bridge/br_multicast.c 2011-05-19 00:06:34.000000000 -0400
65177 +++ linux-2.6.39.4/net/bridge/br_multicast.c 2011-08-05 19:44:37.000000000 -0400
65178 @@ -1482,7 +1482,7 @@ static int br_multicast_ipv6_rcv(struct
65179 nexthdr = ip6h->nexthdr;
65180 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
65181
65182 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
65183 + if (nexthdr != IPPROTO_ICMPV6)
65184 return 0;
65185
65186 /* Okay, we found ICMPv6 header */
65187 diff -urNp linux-2.6.39.4/net/bridge/netfilter/ebtables.c linux-2.6.39.4/net/bridge/netfilter/ebtables.c
65188 --- linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-05-19 00:06:34.000000000 -0400
65189 +++ linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-08-05 19:44:37.000000000 -0400
65190 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
65191 tmp.valid_hooks = t->table->valid_hooks;
65192 }
65193 mutex_unlock(&ebt_mutex);
65194 - if (copy_to_user(user, &tmp, *len) != 0){
65195 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
65196 BUGPRINT("c2u Didn't work\n");
65197 ret = -EFAULT;
65198 break;
65199 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
65200 int ret;
65201 void __user *pos;
65202
65203 + pax_track_stack();
65204 +
65205 memset(&tinfo, 0, sizeof(tinfo));
65206
65207 if (cmd == EBT_SO_GET_ENTRIES) {
65208 diff -urNp linux-2.6.39.4/net/caif/caif_socket.c linux-2.6.39.4/net/caif/caif_socket.c
65209 --- linux-2.6.39.4/net/caif/caif_socket.c 2011-05-19 00:06:34.000000000 -0400
65210 +++ linux-2.6.39.4/net/caif/caif_socket.c 2011-08-05 19:44:37.000000000 -0400
65211 @@ -48,18 +48,19 @@ static struct dentry *debugfsdir;
65212 #ifdef CONFIG_DEBUG_FS
65213 struct debug_fs_counter {
65214 atomic_t caif_nr_socks;
65215 - atomic_t num_connect_req;
65216 - atomic_t num_connect_resp;
65217 - atomic_t num_connect_fail_resp;
65218 - atomic_t num_disconnect;
65219 - atomic_t num_remote_shutdown_ind;
65220 - atomic_t num_tx_flow_off_ind;
65221 - atomic_t num_tx_flow_on_ind;
65222 - atomic_t num_rx_flow_off;
65223 - atomic_t num_rx_flow_on;
65224 + atomic_unchecked_t num_connect_req;
65225 + atomic_unchecked_t num_connect_resp;
65226 + atomic_unchecked_t num_connect_fail_resp;
65227 + atomic_unchecked_t num_disconnect;
65228 + atomic_unchecked_t num_remote_shutdown_ind;
65229 + atomic_unchecked_t num_tx_flow_off_ind;
65230 + atomic_unchecked_t num_tx_flow_on_ind;
65231 + atomic_unchecked_t num_rx_flow_off;
65232 + atomic_unchecked_t num_rx_flow_on;
65233 };
65234 static struct debug_fs_counter cnt;
65235 #define dbfs_atomic_inc(v) atomic_inc(v)
65236 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_unchecked(v)
65237 #define dbfs_atomic_dec(v) atomic_dec(v)
65238 #else
65239 #define dbfs_atomic_inc(v)
65240 @@ -159,7 +160,7 @@ static int caif_queue_rcv_skb(struct soc
65241 atomic_read(&cf_sk->sk.sk_rmem_alloc),
65242 sk_rcvbuf_lowwater(cf_sk));
65243 set_rx_flow_off(cf_sk);
65244 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65245 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65246 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65247 }
65248
65249 @@ -169,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc
65250 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
65251 set_rx_flow_off(cf_sk);
65252 pr_debug("sending flow OFF due to rmem_schedule\n");
65253 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65254 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65255 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65256 }
65257 skb->dev = NULL;
65258 @@ -218,21 +219,21 @@ static void caif_ctrl_cb(struct cflayer
65259 switch (flow) {
65260 case CAIF_CTRLCMD_FLOW_ON_IND:
65261 /* OK from modem to start sending again */
65262 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
65263 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
65264 set_tx_flow_on(cf_sk);
65265 cf_sk->sk.sk_state_change(&cf_sk->sk);
65266 break;
65267
65268 case CAIF_CTRLCMD_FLOW_OFF_IND:
65269 /* Modem asks us to shut up */
65270 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
65271 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
65272 set_tx_flow_off(cf_sk);
65273 cf_sk->sk.sk_state_change(&cf_sk->sk);
65274 break;
65275
65276 case CAIF_CTRLCMD_INIT_RSP:
65277 /* We're now connected */
65278 - dbfs_atomic_inc(&cnt.num_connect_resp);
65279 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
65280 cf_sk->sk.sk_state = CAIF_CONNECTED;
65281 set_tx_flow_on(cf_sk);
65282 cf_sk->sk.sk_state_change(&cf_sk->sk);
65283 @@ -247,7 +248,7 @@ static void caif_ctrl_cb(struct cflayer
65284
65285 case CAIF_CTRLCMD_INIT_FAIL_RSP:
65286 /* Connect request failed */
65287 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
65288 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
65289 cf_sk->sk.sk_err = ECONNREFUSED;
65290 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
65291 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65292 @@ -261,7 +262,7 @@ static void caif_ctrl_cb(struct cflayer
65293
65294 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
65295 /* Modem has closed this connection, or device is down. */
65296 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
65297 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
65298 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65299 cf_sk->sk.sk_err = ECONNRESET;
65300 set_rx_flow_on(cf_sk);
65301 @@ -281,7 +282,7 @@ static void caif_check_flow_release(stru
65302 return;
65303
65304 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
65305 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
65306 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
65307 set_rx_flow_on(cf_sk);
65308 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
65309 }
65310 @@ -864,7 +865,7 @@ static int caif_connect(struct socket *s
65311 /*ifindex = id of the interface.*/
65312 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
65313
65314 - dbfs_atomic_inc(&cnt.num_connect_req);
65315 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
65316 cf_sk->layer.receive = caif_sktrecv_cb;
65317 err = caif_connect_client(&cf_sk->conn_req,
65318 &cf_sk->layer, &ifindex, &headroom, &tailroom);
65319 @@ -952,7 +953,7 @@ static int caif_release(struct socket *s
65320 spin_unlock(&sk->sk_receive_queue.lock);
65321 sock->sk = NULL;
65322
65323 - dbfs_atomic_inc(&cnt.num_disconnect);
65324 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
65325
65326 if (cf_sk->debugfs_socket_dir != NULL)
65327 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
65328 diff -urNp linux-2.6.39.4/net/caif/cfctrl.c linux-2.6.39.4/net/caif/cfctrl.c
65329 --- linux-2.6.39.4/net/caif/cfctrl.c 2011-05-19 00:06:34.000000000 -0400
65330 +++ linux-2.6.39.4/net/caif/cfctrl.c 2011-08-05 19:44:37.000000000 -0400
65331 @@ -9,6 +9,7 @@
65332 #include <linux/stddef.h>
65333 #include <linux/spinlock.h>
65334 #include <linux/slab.h>
65335 +#include <linux/sched.h>
65336 #include <net/caif/caif_layer.h>
65337 #include <net/caif/cfpkt.h>
65338 #include <net/caif/cfctrl.h>
65339 @@ -46,8 +47,8 @@ struct cflayer *cfctrl_create(void)
65340 dev_info.id = 0xff;
65341 memset(this, 0, sizeof(*this));
65342 cfsrvl_init(&this->serv, 0, &dev_info, false);
65343 - atomic_set(&this->req_seq_no, 1);
65344 - atomic_set(&this->rsp_seq_no, 1);
65345 + atomic_set_unchecked(&this->req_seq_no, 1);
65346 + atomic_set_unchecked(&this->rsp_seq_no, 1);
65347 this->serv.layer.receive = cfctrl_recv;
65348 sprintf(this->serv.layer.name, "ctrl");
65349 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65350 @@ -116,8 +117,8 @@ void cfctrl_insert_req(struct cfctrl *ct
65351 struct cfctrl_request_info *req)
65352 {
65353 spin_lock(&ctrl->info_list_lock);
65354 - atomic_inc(&ctrl->req_seq_no);
65355 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
65356 + atomic_inc_unchecked(&ctrl->req_seq_no);
65357 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65358 list_add_tail(&req->list, &ctrl->list);
65359 spin_unlock(&ctrl->info_list_lock);
65360 }
65361 @@ -136,7 +137,7 @@ struct cfctrl_request_info *cfctrl_remov
65362 if (p != first)
65363 pr_warn("Requests are not received in order\n");
65364
65365 - atomic_set(&ctrl->rsp_seq_no,
65366 + atomic_set_unchecked(&ctrl->rsp_seq_no,
65367 p->sequence_no);
65368 list_del(&p->list);
65369 goto out;
65370 @@ -385,6 +386,7 @@ static int cfctrl_recv(struct cflayer *l
65371 struct cfctrl *cfctrl = container_obj(layer);
65372 struct cfctrl_request_info rsp, *req;
65373
65374 + pax_track_stack();
65375
65376 cfpkt_extr_head(pkt, &cmdrsp, 1);
65377 cmd = cmdrsp & CFCTRL_CMD_MASK;
65378 diff -urNp linux-2.6.39.4/net/can/bcm.c linux-2.6.39.4/net/can/bcm.c
65379 --- linux-2.6.39.4/net/can/bcm.c 2011-05-19 00:06:34.000000000 -0400
65380 +++ linux-2.6.39.4/net/can/bcm.c 2011-08-05 19:44:37.000000000 -0400
65381 @@ -165,9 +165,15 @@ static int bcm_proc_show(struct seq_file
65382 struct bcm_sock *bo = bcm_sk(sk);
65383 struct bcm_op *op;
65384
65385 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65386 + seq_printf(m, ">>> socket %p", NULL);
65387 + seq_printf(m, " / sk %p", NULL);
65388 + seq_printf(m, " / bo %p", NULL);
65389 +#else
65390 seq_printf(m, ">>> socket %p", sk->sk_socket);
65391 seq_printf(m, " / sk %p", sk);
65392 seq_printf(m, " / bo %p", bo);
65393 +#endif
65394 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
65395 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
65396 seq_printf(m, " <<<\n");
65397 diff -urNp linux-2.6.39.4/net/core/datagram.c linux-2.6.39.4/net/core/datagram.c
65398 --- linux-2.6.39.4/net/core/datagram.c 2011-05-19 00:06:34.000000000 -0400
65399 +++ linux-2.6.39.4/net/core/datagram.c 2011-08-05 19:44:37.000000000 -0400
65400 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65401 }
65402
65403 kfree_skb(skb);
65404 - atomic_inc(&sk->sk_drops);
65405 + atomic_inc_unchecked(&sk->sk_drops);
65406 sk_mem_reclaim_partial(sk);
65407
65408 return err;
65409 diff -urNp linux-2.6.39.4/net/core/dev.c linux-2.6.39.4/net/core/dev.c
65410 --- linux-2.6.39.4/net/core/dev.c 2011-06-03 00:04:14.000000000 -0400
65411 +++ linux-2.6.39.4/net/core/dev.c 2011-08-05 20:34:06.000000000 -0400
65412 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65413 if (no_module && capable(CAP_NET_ADMIN))
65414 no_module = request_module("netdev-%s", name);
65415 if (no_module && capable(CAP_SYS_MODULE)) {
65416 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65417 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
65418 +#else
65419 if (!request_module("%s", name))
65420 pr_err("Loading kernel module for a network device "
65421 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65422 "instead\n", name);
65423 +#endif
65424 }
65425 }
65426 EXPORT_SYMBOL(dev_load);
65427 @@ -1951,7 +1955,7 @@ static int illegal_highdma(struct net_de
65428
65429 struct dev_gso_cb {
65430 void (*destructor)(struct sk_buff *skb);
65431 -};
65432 +} __no_const;
65433
65434 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65435
65436 @@ -2901,7 +2905,7 @@ int netif_rx_ni(struct sk_buff *skb)
65437 }
65438 EXPORT_SYMBOL(netif_rx_ni);
65439
65440 -static void net_tx_action(struct softirq_action *h)
65441 +static void net_tx_action(void)
65442 {
65443 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65444
65445 @@ -3765,7 +3769,7 @@ void netif_napi_del(struct napi_struct *
65446 }
65447 EXPORT_SYMBOL(netif_napi_del);
65448
65449 -static void net_rx_action(struct softirq_action *h)
65450 +static void net_rx_action(void)
65451 {
65452 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65453 unsigned long time_limit = jiffies + 2;
65454 diff -urNp linux-2.6.39.4/net/core/flow.c linux-2.6.39.4/net/core/flow.c
65455 --- linux-2.6.39.4/net/core/flow.c 2011-05-19 00:06:34.000000000 -0400
65456 +++ linux-2.6.39.4/net/core/flow.c 2011-08-05 19:44:37.000000000 -0400
65457 @@ -60,7 +60,7 @@ struct flow_cache {
65458 struct timer_list rnd_timer;
65459 };
65460
65461 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
65462 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65463 EXPORT_SYMBOL(flow_cache_genid);
65464 static struct flow_cache flow_cache_global;
65465 static struct kmem_cache *flow_cachep __read_mostly;
65466 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65467
65468 static int flow_entry_valid(struct flow_cache_entry *fle)
65469 {
65470 - if (atomic_read(&flow_cache_genid) != fle->genid)
65471 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65472 return 0;
65473 if (fle->object && !fle->object->ops->check(fle->object))
65474 return 0;
65475 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65476 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65477 fcp->hash_count++;
65478 }
65479 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65480 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65481 flo = fle->object;
65482 if (!flo)
65483 goto ret_object;
65484 @@ -274,7 +274,7 @@ nocache:
65485 }
65486 flo = resolver(net, key, family, dir, flo, ctx);
65487 if (fle) {
65488 - fle->genid = atomic_read(&flow_cache_genid);
65489 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
65490 if (!IS_ERR(flo))
65491 fle->object = flo;
65492 else
65493 diff -urNp linux-2.6.39.4/net/core/rtnetlink.c linux-2.6.39.4/net/core/rtnetlink.c
65494 --- linux-2.6.39.4/net/core/rtnetlink.c 2011-05-19 00:06:34.000000000 -0400
65495 +++ linux-2.6.39.4/net/core/rtnetlink.c 2011-08-05 20:34:06.000000000 -0400
65496 @@ -56,7 +56,7 @@
65497 struct rtnl_link {
65498 rtnl_doit_func doit;
65499 rtnl_dumpit_func dumpit;
65500 -};
65501 +} __no_const;
65502
65503 static DEFINE_MUTEX(rtnl_mutex);
65504
65505 diff -urNp linux-2.6.39.4/net/core/skbuff.c linux-2.6.39.4/net/core/skbuff.c
65506 --- linux-2.6.39.4/net/core/skbuff.c 2011-06-03 00:04:14.000000000 -0400
65507 +++ linux-2.6.39.4/net/core/skbuff.c 2011-08-05 19:44:37.000000000 -0400
65508 @@ -1542,6 +1542,8 @@ int skb_splice_bits(struct sk_buff *skb,
65509 struct sock *sk = skb->sk;
65510 int ret = 0;
65511
65512 + pax_track_stack();
65513 +
65514 if (splice_grow_spd(pipe, &spd))
65515 return -ENOMEM;
65516
65517 diff -urNp linux-2.6.39.4/net/core/sock.c linux-2.6.39.4/net/core/sock.c
65518 --- linux-2.6.39.4/net/core/sock.c 2011-05-19 00:06:34.000000000 -0400
65519 +++ linux-2.6.39.4/net/core/sock.c 2011-08-05 19:44:37.000000000 -0400
65520 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65521 */
65522 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65523 (unsigned)sk->sk_rcvbuf) {
65524 - atomic_inc(&sk->sk_drops);
65525 + atomic_inc_unchecked(&sk->sk_drops);
65526 return -ENOMEM;
65527 }
65528
65529 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65530 return err;
65531
65532 if (!sk_rmem_schedule(sk, skb->truesize)) {
65533 - atomic_inc(&sk->sk_drops);
65534 + atomic_inc_unchecked(&sk->sk_drops);
65535 return -ENOBUFS;
65536 }
65537
65538 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65539 skb_dst_force(skb);
65540
65541 spin_lock_irqsave(&list->lock, flags);
65542 - skb->dropcount = atomic_read(&sk->sk_drops);
65543 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65544 __skb_queue_tail(list, skb);
65545 spin_unlock_irqrestore(&list->lock, flags);
65546
65547 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65548 skb->dev = NULL;
65549
65550 if (sk_rcvqueues_full(sk, skb)) {
65551 - atomic_inc(&sk->sk_drops);
65552 + atomic_inc_unchecked(&sk->sk_drops);
65553 goto discard_and_relse;
65554 }
65555 if (nested)
65556 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65557 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65558 } else if (sk_add_backlog(sk, skb)) {
65559 bh_unlock_sock(sk);
65560 - atomic_inc(&sk->sk_drops);
65561 + atomic_inc_unchecked(&sk->sk_drops);
65562 goto discard_and_relse;
65563 }
65564
65565 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65566 return -ENOTCONN;
65567 if (lv < len)
65568 return -EINVAL;
65569 - if (copy_to_user(optval, address, len))
65570 + if (len > sizeof(address) || copy_to_user(optval, address, len))
65571 return -EFAULT;
65572 goto lenout;
65573 }
65574 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65575
65576 if (len > lv)
65577 len = lv;
65578 - if (copy_to_user(optval, &v, len))
65579 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
65580 return -EFAULT;
65581 lenout:
65582 if (put_user(len, optlen))
65583 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65584 */
65585 smp_wmb();
65586 atomic_set(&sk->sk_refcnt, 1);
65587 - atomic_set(&sk->sk_drops, 0);
65588 + atomic_set_unchecked(&sk->sk_drops, 0);
65589 }
65590 EXPORT_SYMBOL(sock_init_data);
65591
65592 diff -urNp linux-2.6.39.4/net/decnet/sysctl_net_decnet.c linux-2.6.39.4/net/decnet/sysctl_net_decnet.c
65593 --- linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-05-19 00:06:34.000000000 -0400
65594 +++ linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-08-05 19:44:37.000000000 -0400
65595 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65596
65597 if (len > *lenp) len = *lenp;
65598
65599 - if (copy_to_user(buffer, addr, len))
65600 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
65601 return -EFAULT;
65602
65603 *lenp = len;
65604 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65605
65606 if (len > *lenp) len = *lenp;
65607
65608 - if (copy_to_user(buffer, devname, len))
65609 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
65610 return -EFAULT;
65611
65612 *lenp = len;
65613 diff -urNp linux-2.6.39.4/net/econet/Kconfig linux-2.6.39.4/net/econet/Kconfig
65614 --- linux-2.6.39.4/net/econet/Kconfig 2011-05-19 00:06:34.000000000 -0400
65615 +++ linux-2.6.39.4/net/econet/Kconfig 2011-08-05 19:44:37.000000000 -0400
65616 @@ -4,7 +4,7 @@
65617
65618 config ECONET
65619 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65620 - depends on EXPERIMENTAL && INET
65621 + depends on EXPERIMENTAL && INET && BROKEN
65622 ---help---
65623 Econet is a fairly old and slow networking protocol mainly used by
65624 Acorn computers to access file and print servers. It uses native
65625 diff -urNp linux-2.6.39.4/net/ipv4/fib_frontend.c linux-2.6.39.4/net/ipv4/fib_frontend.c
65626 --- linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-05-19 00:06:34.000000000 -0400
65627 +++ linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-08-05 19:44:37.000000000 -0400
65628 @@ -968,12 +968,12 @@ static int fib_inetaddr_event(struct not
65629 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65630 fib_sync_up(dev);
65631 #endif
65632 - atomic_inc(&net->ipv4.dev_addr_genid);
65633 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65634 rt_cache_flush(dev_net(dev), -1);
65635 break;
65636 case NETDEV_DOWN:
65637 fib_del_ifaddr(ifa, NULL);
65638 - atomic_inc(&net->ipv4.dev_addr_genid);
65639 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65640 if (ifa->ifa_dev->ifa_list == NULL) {
65641 /* Last address was deleted from this interface.
65642 * Disable IP.
65643 @@ -1009,7 +1009,7 @@ static int fib_netdev_event(struct notif
65644 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65645 fib_sync_up(dev);
65646 #endif
65647 - atomic_inc(&net->ipv4.dev_addr_genid);
65648 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65649 rt_cache_flush(dev_net(dev), -1);
65650 break;
65651 case NETDEV_DOWN:
65652 diff -urNp linux-2.6.39.4/net/ipv4/fib_semantics.c linux-2.6.39.4/net/ipv4/fib_semantics.c
65653 --- linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-05-19 00:06:34.000000000 -0400
65654 +++ linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-08-05 19:44:37.000000000 -0400
65655 @@ -701,7 +701,7 @@ __be32 fib_info_update_nh_saddr(struct n
65656 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65657 nh->nh_gw,
65658 nh->nh_parent->fib_scope);
65659 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65660 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65661
65662 return nh->nh_saddr;
65663 }
65664 diff -urNp linux-2.6.39.4/net/ipv4/inet_diag.c linux-2.6.39.4/net/ipv4/inet_diag.c
65665 --- linux-2.6.39.4/net/ipv4/inet_diag.c 2011-07-09 09:18:51.000000000 -0400
65666 +++ linux-2.6.39.4/net/ipv4/inet_diag.c 2011-08-05 19:44:37.000000000 -0400
65667 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65668 r->idiag_retrans = 0;
65669
65670 r->id.idiag_if = sk->sk_bound_dev_if;
65671 +
65672 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65673 + r->id.idiag_cookie[0] = 0;
65674 + r->id.idiag_cookie[1] = 0;
65675 +#else
65676 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65677 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65678 +#endif
65679
65680 r->id.idiag_sport = inet->inet_sport;
65681 r->id.idiag_dport = inet->inet_dport;
65682 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65683 r->idiag_family = tw->tw_family;
65684 r->idiag_retrans = 0;
65685 r->id.idiag_if = tw->tw_bound_dev_if;
65686 +
65687 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65688 + r->id.idiag_cookie[0] = 0;
65689 + r->id.idiag_cookie[1] = 0;
65690 +#else
65691 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65692 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65693 +#endif
65694 +
65695 r->id.idiag_sport = tw->tw_sport;
65696 r->id.idiag_dport = tw->tw_dport;
65697 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65698 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65699 if (sk == NULL)
65700 goto unlock;
65701
65702 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65703 err = -ESTALE;
65704 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65705 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65706 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65707 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65708 goto out;
65709 +#endif
65710
65711 err = -ENOMEM;
65712 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65713 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65714 r->idiag_retrans = req->retrans;
65715
65716 r->id.idiag_if = sk->sk_bound_dev_if;
65717 +
65718 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65719 + r->id.idiag_cookie[0] = 0;
65720 + r->id.idiag_cookie[1] = 0;
65721 +#else
65722 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65723 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65724 +#endif
65725
65726 tmo = req->expires - jiffies;
65727 if (tmo < 0)
65728 diff -urNp linux-2.6.39.4/net/ipv4/inet_hashtables.c linux-2.6.39.4/net/ipv4/inet_hashtables.c
65729 --- linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-05-19 00:06:34.000000000 -0400
65730 +++ linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-08-05 19:44:37.000000000 -0400
65731 @@ -18,11 +18,14 @@
65732 #include <linux/sched.h>
65733 #include <linux/slab.h>
65734 #include <linux/wait.h>
65735 +#include <linux/security.h>
65736
65737 #include <net/inet_connection_sock.h>
65738 #include <net/inet_hashtables.h>
65739 #include <net/ip.h>
65740
65741 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65742 +
65743 /*
65744 * Allocate and initialize a new local port bind bucket.
65745 * The bindhash mutex for snum's hash chain must be held here.
65746 @@ -529,6 +532,8 @@ ok:
65747 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65748 spin_unlock(&head->lock);
65749
65750 + gr_update_task_in_ip_table(current, inet_sk(sk));
65751 +
65752 if (tw) {
65753 inet_twsk_deschedule(tw, death_row);
65754 while (twrefcnt) {
65755 diff -urNp linux-2.6.39.4/net/ipv4/inetpeer.c linux-2.6.39.4/net/ipv4/inetpeer.c
65756 --- linux-2.6.39.4/net/ipv4/inetpeer.c 2011-07-09 09:18:51.000000000 -0400
65757 +++ linux-2.6.39.4/net/ipv4/inetpeer.c 2011-08-05 19:44:37.000000000 -0400
65758 @@ -480,6 +480,8 @@ struct inet_peer *inet_getpeer(struct in
65759 unsigned int sequence;
65760 int invalidated, newrefcnt = 0;
65761
65762 + pax_track_stack();
65763 +
65764 /* Look up for the address quickly, lockless.
65765 * Because of a concurrent writer, we might not find an existing entry.
65766 */
65767 @@ -516,8 +518,8 @@ found: /* The existing node has been fo
65768 if (p) {
65769 p->daddr = *daddr;
65770 atomic_set(&p->refcnt, 1);
65771 - atomic_set(&p->rid, 0);
65772 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65773 + atomic_set_unchecked(&p->rid, 0);
65774 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65775 p->tcp_ts_stamp = 0;
65776 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65777 p->rate_tokens = 0;
65778 diff -urNp linux-2.6.39.4/net/ipv4/ip_fragment.c linux-2.6.39.4/net/ipv4/ip_fragment.c
65779 --- linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-05-19 00:06:34.000000000 -0400
65780 +++ linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-08-05 19:44:37.000000000 -0400
65781 @@ -297,7 +297,7 @@ static inline int ip_frag_too_far(struct
65782 return 0;
65783
65784 start = qp->rid;
65785 - end = atomic_inc_return(&peer->rid);
65786 + end = atomic_inc_return_unchecked(&peer->rid);
65787 qp->rid = end;
65788
65789 rc = qp->q.fragments && (end - start) > max;
65790 diff -urNp linux-2.6.39.4/net/ipv4/ip_sockglue.c linux-2.6.39.4/net/ipv4/ip_sockglue.c
65791 --- linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-05-19 00:06:34.000000000 -0400
65792 +++ linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-08-05 19:44:37.000000000 -0400
65793 @@ -1064,6 +1064,8 @@ static int do_ip_getsockopt(struct sock
65794 int val;
65795 int len;
65796
65797 + pax_track_stack();
65798 +
65799 if (level != SOL_IP)
65800 return -EOPNOTSUPP;
65801
65802 diff -urNp linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
65803 --- linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-05-19 00:06:34.000000000 -0400
65804 +++ linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-05 19:44:37.000000000 -0400
65805 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65806
65807 *len = 0;
65808
65809 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65810 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65811 if (*octets == NULL) {
65812 if (net_ratelimit())
65813 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65814 diff -urNp linux-2.6.39.4/net/ipv4/raw.c linux-2.6.39.4/net/ipv4/raw.c
65815 --- linux-2.6.39.4/net/ipv4/raw.c 2011-05-19 00:06:34.000000000 -0400
65816 +++ linux-2.6.39.4/net/ipv4/raw.c 2011-08-05 19:44:37.000000000 -0400
65817 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65818 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65819 {
65820 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65821 - atomic_inc(&sk->sk_drops);
65822 + atomic_inc_unchecked(&sk->sk_drops);
65823 kfree_skb(skb);
65824 return NET_RX_DROP;
65825 }
65826 @@ -730,15 +730,19 @@ static int raw_init(struct sock *sk)
65827
65828 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65829 {
65830 + struct icmp_filter filter;
65831 +
65832 if (optlen > sizeof(struct icmp_filter))
65833 optlen = sizeof(struct icmp_filter);
65834 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65835 + if (copy_from_user(&filter, optval, optlen))
65836 return -EFAULT;
65837 + memcpy(&raw_sk(sk)->filter, &filter, sizeof(filter));
65838 return 0;
65839 }
65840
65841 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65842 {
65843 + struct icmp_filter filter;
65844 int len, ret = -EFAULT;
65845
65846 if (get_user(len, optlen))
65847 @@ -749,8 +753,9 @@ static int raw_geticmpfilter(struct sock
65848 if (len > sizeof(struct icmp_filter))
65849 len = sizeof(struct icmp_filter);
65850 ret = -EFAULT;
65851 + memcpy(&filter, &raw_sk(sk)->filter, len);
65852 if (put_user(len, optlen) ||
65853 - copy_to_user(optval, &raw_sk(sk)->filter, len))
65854 + copy_to_user(optval, &filter, len))
65855 goto out;
65856 ret = 0;
65857 out: return ret;
65858 @@ -978,7 +983,13 @@ static void raw_sock_seq_show(struct seq
65859 sk_wmem_alloc_get(sp),
65860 sk_rmem_alloc_get(sp),
65861 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65862 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65863 + atomic_read(&sp->sk_refcnt),
65864 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65865 + NULL,
65866 +#else
65867 + sp,
65868 +#endif
65869 + atomic_read_unchecked(&sp->sk_drops));
65870 }
65871
65872 static int raw_seq_show(struct seq_file *seq, void *v)
65873 diff -urNp linux-2.6.39.4/net/ipv4/route.c linux-2.6.39.4/net/ipv4/route.c
65874 --- linux-2.6.39.4/net/ipv4/route.c 2011-07-09 09:18:51.000000000 -0400
65875 +++ linux-2.6.39.4/net/ipv4/route.c 2011-08-05 19:44:37.000000000 -0400
65876 @@ -303,7 +303,7 @@ static inline unsigned int rt_hash(__be3
65877
65878 static inline int rt_genid(struct net *net)
65879 {
65880 - return atomic_read(&net->ipv4.rt_genid);
65881 + return atomic_read_unchecked(&net->ipv4.rt_genid);
65882 }
65883
65884 #ifdef CONFIG_PROC_FS
65885 @@ -831,7 +831,7 @@ static void rt_cache_invalidate(struct n
65886 unsigned char shuffle;
65887
65888 get_random_bytes(&shuffle, sizeof(shuffle));
65889 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65890 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65891 }
65892
65893 /*
65894 @@ -2833,7 +2833,7 @@ static int rt_fill_info(struct net *net,
65895 rt->peer->pmtu_expires - jiffies : 0;
65896 if (rt->peer) {
65897 inet_peer_refcheck(rt->peer);
65898 - id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
65899 + id = atomic_read_unchecked(&rt->peer->ip_id_count) & 0xffff;
65900 if (rt->peer->tcp_ts_stamp) {
65901 ts = rt->peer->tcp_ts;
65902 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
65903 diff -urNp linux-2.6.39.4/net/ipv4/tcp.c linux-2.6.39.4/net/ipv4/tcp.c
65904 --- linux-2.6.39.4/net/ipv4/tcp.c 2011-05-19 00:06:34.000000000 -0400
65905 +++ linux-2.6.39.4/net/ipv4/tcp.c 2011-08-05 19:44:37.000000000 -0400
65906 @@ -2121,6 +2121,8 @@ static int do_tcp_setsockopt(struct sock
65907 int val;
65908 int err = 0;
65909
65910 + pax_track_stack();
65911 +
65912 /* These are data/string values, all the others are ints */
65913 switch (optname) {
65914 case TCP_CONGESTION: {
65915 @@ -2500,6 +2502,8 @@ static int do_tcp_getsockopt(struct sock
65916 struct tcp_sock *tp = tcp_sk(sk);
65917 int val, len;
65918
65919 + pax_track_stack();
65920 +
65921 if (get_user(len, optlen))
65922 return -EFAULT;
65923
65924 diff -urNp linux-2.6.39.4/net/ipv4/tcp_ipv4.c linux-2.6.39.4/net/ipv4/tcp_ipv4.c
65925 --- linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-05-19 00:06:34.000000000 -0400
65926 +++ linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-08-05 19:44:37.000000000 -0400
65927 @@ -86,6 +86,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
65928 int sysctl_tcp_low_latency __read_mostly;
65929 EXPORT_SYMBOL(sysctl_tcp_low_latency);
65930
65931 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65932 +extern int grsec_enable_blackhole;
65933 +#endif
65934
65935 #ifdef CONFIG_TCP_MD5SIG
65936 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
65937 @@ -1594,6 +1597,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
65938 return 0;
65939
65940 reset:
65941 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65942 + if (!grsec_enable_blackhole)
65943 +#endif
65944 tcp_v4_send_reset(rsk, skb);
65945 discard:
65946 kfree_skb(skb);
65947 @@ -1656,12 +1662,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
65948 TCP_SKB_CB(skb)->sacked = 0;
65949
65950 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
65951 - if (!sk)
65952 + if (!sk) {
65953 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65954 + ret = 1;
65955 +#endif
65956 goto no_tcp_socket;
65957 -
65958 + }
65959 process:
65960 - if (sk->sk_state == TCP_TIME_WAIT)
65961 + if (sk->sk_state == TCP_TIME_WAIT) {
65962 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65963 + ret = 2;
65964 +#endif
65965 goto do_time_wait;
65966 + }
65967
65968 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
65969 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
65970 @@ -1711,6 +1724,10 @@ no_tcp_socket:
65971 bad_packet:
65972 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
65973 } else {
65974 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65975 + if (!grsec_enable_blackhole || (ret == 1 &&
65976 + (skb->dev->flags & IFF_LOOPBACK)))
65977 +#endif
65978 tcp_v4_send_reset(NULL, skb);
65979 }
65980
65981 @@ -2374,7 +2391,11 @@ static void get_openreq4(struct sock *sk
65982 0, /* non standard timer */
65983 0, /* open_requests have no inode */
65984 atomic_read(&sk->sk_refcnt),
65985 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65986 + NULL,
65987 +#else
65988 req,
65989 +#endif
65990 len);
65991 }
65992
65993 @@ -2424,7 +2445,12 @@ static void get_tcp4_sock(struct sock *s
65994 sock_i_uid(sk),
65995 icsk->icsk_probes_out,
65996 sock_i_ino(sk),
65997 - atomic_read(&sk->sk_refcnt), sk,
65998 + atomic_read(&sk->sk_refcnt),
65999 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66000 + NULL,
66001 +#else
66002 + sk,
66003 +#endif
66004 jiffies_to_clock_t(icsk->icsk_rto),
66005 jiffies_to_clock_t(icsk->icsk_ack.ato),
66006 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
66007 @@ -2452,7 +2478,13 @@ static void get_timewait4_sock(struct in
66008 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
66009 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
66010 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66011 - atomic_read(&tw->tw_refcnt), tw, len);
66012 + atomic_read(&tw->tw_refcnt),
66013 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66014 + NULL,
66015 +#else
66016 + tw,
66017 +#endif
66018 + len);
66019 }
66020
66021 #define TMPSZ 150
66022 diff -urNp linux-2.6.39.4/net/ipv4/tcp_minisocks.c linux-2.6.39.4/net/ipv4/tcp_minisocks.c
66023 --- linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-05-19 00:06:34.000000000 -0400
66024 +++ linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-08-05 19:44:37.000000000 -0400
66025 @@ -27,6 +27,10 @@
66026 #include <net/inet_common.h>
66027 #include <net/xfrm.h>
66028
66029 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66030 +extern int grsec_enable_blackhole;
66031 +#endif
66032 +
66033 int sysctl_tcp_syncookies __read_mostly = 1;
66034 EXPORT_SYMBOL(sysctl_tcp_syncookies);
66035
66036 @@ -745,6 +749,10 @@ listen_overflow:
66037
66038 embryonic_reset:
66039 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
66040 +
66041 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66042 + if (!grsec_enable_blackhole)
66043 +#endif
66044 if (!(flg & TCP_FLAG_RST))
66045 req->rsk_ops->send_reset(sk, skb);
66046
66047 diff -urNp linux-2.6.39.4/net/ipv4/tcp_output.c linux-2.6.39.4/net/ipv4/tcp_output.c
66048 --- linux-2.6.39.4/net/ipv4/tcp_output.c 2011-05-19 00:06:34.000000000 -0400
66049 +++ linux-2.6.39.4/net/ipv4/tcp_output.c 2011-08-05 19:44:37.000000000 -0400
66050 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
66051 int mss;
66052 int s_data_desired = 0;
66053
66054 + pax_track_stack();
66055 +
66056 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
66057 s_data_desired = cvp->s_data_desired;
66058 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
66059 diff -urNp linux-2.6.39.4/net/ipv4/tcp_probe.c linux-2.6.39.4/net/ipv4/tcp_probe.c
66060 --- linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-05-19 00:06:34.000000000 -0400
66061 +++ linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-08-05 19:44:37.000000000 -0400
66062 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
66063 if (cnt + width >= len)
66064 break;
66065
66066 - if (copy_to_user(buf + cnt, tbuf, width))
66067 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
66068 return -EFAULT;
66069 cnt += width;
66070 }
66071 diff -urNp linux-2.6.39.4/net/ipv4/tcp_timer.c linux-2.6.39.4/net/ipv4/tcp_timer.c
66072 --- linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-05-19 00:06:34.000000000 -0400
66073 +++ linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-08-05 19:44:37.000000000 -0400
66074 @@ -22,6 +22,10 @@
66075 #include <linux/gfp.h>
66076 #include <net/tcp.h>
66077
66078 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66079 +extern int grsec_lastack_retries;
66080 +#endif
66081 +
66082 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
66083 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
66084 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
66085 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
66086 }
66087 }
66088
66089 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66090 + if ((sk->sk_state == TCP_LAST_ACK) &&
66091 + (grsec_lastack_retries > 0) &&
66092 + (grsec_lastack_retries < retry_until))
66093 + retry_until = grsec_lastack_retries;
66094 +#endif
66095 +
66096 if (retransmits_timed_out(sk, retry_until,
66097 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
66098 /* Has it gone just too far? */
66099 diff -urNp linux-2.6.39.4/net/ipv4/udp.c linux-2.6.39.4/net/ipv4/udp.c
66100 --- linux-2.6.39.4/net/ipv4/udp.c 2011-07-09 09:18:51.000000000 -0400
66101 +++ linux-2.6.39.4/net/ipv4/udp.c 2011-08-05 19:44:37.000000000 -0400
66102 @@ -86,6 +86,7 @@
66103 #include <linux/types.h>
66104 #include <linux/fcntl.h>
66105 #include <linux/module.h>
66106 +#include <linux/security.h>
66107 #include <linux/socket.h>
66108 #include <linux/sockios.h>
66109 #include <linux/igmp.h>
66110 @@ -107,6 +108,10 @@
66111 #include <net/xfrm.h>
66112 #include "udp_impl.h"
66113
66114 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66115 +extern int grsec_enable_blackhole;
66116 +#endif
66117 +
66118 struct udp_table udp_table __read_mostly;
66119 EXPORT_SYMBOL(udp_table);
66120
66121 @@ -564,6 +569,9 @@ found:
66122 return s;
66123 }
66124
66125 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
66126 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
66127 +
66128 /*
66129 * This routine is called by the ICMP module when it gets some
66130 * sort of error condition. If err < 0 then the socket should
66131 @@ -853,9 +861,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
66132 dport = usin->sin_port;
66133 if (dport == 0)
66134 return -EINVAL;
66135 +
66136 + err = gr_search_udp_sendmsg(sk, usin);
66137 + if (err)
66138 + return err;
66139 } else {
66140 if (sk->sk_state != TCP_ESTABLISHED)
66141 return -EDESTADDRREQ;
66142 +
66143 + err = gr_search_udp_sendmsg(sk, NULL);
66144 + if (err)
66145 + return err;
66146 +
66147 daddr = inet->inet_daddr;
66148 dport = inet->inet_dport;
66149 /* Open fast path for connected socket.
66150 @@ -1090,7 +1107,7 @@ static unsigned int first_packet_length(
66151 udp_lib_checksum_complete(skb)) {
66152 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66153 IS_UDPLITE(sk));
66154 - atomic_inc(&sk->sk_drops);
66155 + atomic_inc_unchecked(&sk->sk_drops);
66156 __skb_unlink(skb, rcvq);
66157 __skb_queue_tail(&list_kill, skb);
66158 }
66159 @@ -1176,6 +1193,10 @@ try_again:
66160 if (!skb)
66161 goto out;
66162
66163 + err = gr_search_udp_recvmsg(sk, skb);
66164 + if (err)
66165 + goto out_free;
66166 +
66167 ulen = skb->len - sizeof(struct udphdr);
66168 if (len > ulen)
66169 len = ulen;
66170 @@ -1475,7 +1496,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
66171
66172 drop:
66173 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66174 - atomic_inc(&sk->sk_drops);
66175 + atomic_inc_unchecked(&sk->sk_drops);
66176 kfree_skb(skb);
66177 return -1;
66178 }
66179 @@ -1494,7 +1515,7 @@ static void flush_stack(struct sock **st
66180 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
66181
66182 if (!skb1) {
66183 - atomic_inc(&sk->sk_drops);
66184 + atomic_inc_unchecked(&sk->sk_drops);
66185 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
66186 IS_UDPLITE(sk));
66187 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66188 @@ -1663,6 +1684,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
66189 goto csum_error;
66190
66191 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
66192 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66193 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66194 +#endif
66195 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
66196
66197 /*
66198 @@ -2090,8 +2114,13 @@ static void udp4_format_sock(struct sock
66199 sk_wmem_alloc_get(sp),
66200 sk_rmem_alloc_get(sp),
66201 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
66202 - atomic_read(&sp->sk_refcnt), sp,
66203 - atomic_read(&sp->sk_drops), len);
66204 + atomic_read(&sp->sk_refcnt),
66205 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66206 + NULL,
66207 +#else
66208 + sp,
66209 +#endif
66210 + atomic_read_unchecked(&sp->sk_drops), len);
66211 }
66212
66213 int udp4_seq_show(struct seq_file *seq, void *v)
66214 diff -urNp linux-2.6.39.4/net/ipv6/inet6_connection_sock.c linux-2.6.39.4/net/ipv6/inet6_connection_sock.c
66215 --- linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-05-19 00:06:34.000000000 -0400
66216 +++ linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-08-05 19:44:37.000000000 -0400
66217 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
66218 #ifdef CONFIG_XFRM
66219 {
66220 struct rt6_info *rt = (struct rt6_info *)dst;
66221 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
66222 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
66223 }
66224 #endif
66225 }
66226 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
66227 #ifdef CONFIG_XFRM
66228 if (dst) {
66229 struct rt6_info *rt = (struct rt6_info *)dst;
66230 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
66231 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
66232 __sk_dst_reset(sk);
66233 dst = NULL;
66234 }
66235 diff -urNp linux-2.6.39.4/net/ipv6/ipv6_sockglue.c linux-2.6.39.4/net/ipv6/ipv6_sockglue.c
66236 --- linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-05-19 00:06:34.000000000 -0400
66237 +++ linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-08-05 19:44:37.000000000 -0400
66238 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
66239 int val, valbool;
66240 int retv = -ENOPROTOOPT;
66241
66242 + pax_track_stack();
66243 +
66244 if (optval == NULL)
66245 val=0;
66246 else {
66247 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
66248 int len;
66249 int val;
66250
66251 + pax_track_stack();
66252 +
66253 if (ip6_mroute_opt(optname))
66254 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
66255
66256 diff -urNp linux-2.6.39.4/net/ipv6/raw.c linux-2.6.39.4/net/ipv6/raw.c
66257 --- linux-2.6.39.4/net/ipv6/raw.c 2011-05-19 00:06:34.000000000 -0400
66258 +++ linux-2.6.39.4/net/ipv6/raw.c 2011-08-05 19:44:37.000000000 -0400
66259 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
66260 {
66261 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
66262 skb_checksum_complete(skb)) {
66263 - atomic_inc(&sk->sk_drops);
66264 + atomic_inc_unchecked(&sk->sk_drops);
66265 kfree_skb(skb);
66266 return NET_RX_DROP;
66267 }
66268 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66269 struct raw6_sock *rp = raw6_sk(sk);
66270
66271 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
66272 - atomic_inc(&sk->sk_drops);
66273 + atomic_inc_unchecked(&sk->sk_drops);
66274 kfree_skb(skb);
66275 return NET_RX_DROP;
66276 }
66277 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66278
66279 if (inet->hdrincl) {
66280 if (skb_checksum_complete(skb)) {
66281 - atomic_inc(&sk->sk_drops);
66282 + atomic_inc_unchecked(&sk->sk_drops);
66283 kfree_skb(skb);
66284 return NET_RX_DROP;
66285 }
66286 @@ -601,7 +601,7 @@ out:
66287 return err;
66288 }
66289
66290 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
66291 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66292 struct flowi6 *fl6, struct dst_entry **dstp,
66293 unsigned int flags)
66294 {
66295 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
66296 u16 proto;
66297 int err;
66298
66299 + pax_track_stack();
66300 +
66301 /* Rough check on arithmetic overflow,
66302 better check is made in ip6_append_data().
66303 */
66304 @@ -909,12 +911,15 @@ do_confirm:
66305 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
66306 char __user *optval, int optlen)
66307 {
66308 + struct icmp6_filter filter;
66309 +
66310 switch (optname) {
66311 case ICMPV6_FILTER:
66312 if (optlen > sizeof(struct icmp6_filter))
66313 optlen = sizeof(struct icmp6_filter);
66314 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66315 + if (copy_from_user(&filter, optval, optlen))
66316 return -EFAULT;
66317 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
66318 return 0;
66319 default:
66320 return -ENOPROTOOPT;
66321 @@ -926,6 +931,7 @@ static int rawv6_seticmpfilter(struct so
66322 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
66323 char __user *optval, int __user *optlen)
66324 {
66325 + struct icmp6_filter filter;
66326 int len;
66327
66328 switch (optname) {
66329 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66330 len = sizeof(struct icmp6_filter);
66331 if (put_user(len, optlen))
66332 return -EFAULT;
66333 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66334 + memcpy(&filter, &raw6_sk(sk)->filter, len);
66335 + if (copy_to_user(optval, &filter, len))
66336 return -EFAULT;
66337 return 0;
66338 default:
66339 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66340 0, 0L, 0,
66341 sock_i_uid(sp), 0,
66342 sock_i_ino(sp),
66343 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66344 + atomic_read(&sp->sk_refcnt),
66345 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66346 + NULL,
66347 +#else
66348 + sp,
66349 +#endif
66350 + atomic_read_unchecked(&sp->sk_drops));
66351 }
66352
66353 static int raw6_seq_show(struct seq_file *seq, void *v)
66354 diff -urNp linux-2.6.39.4/net/ipv6/tcp_ipv6.c linux-2.6.39.4/net/ipv6/tcp_ipv6.c
66355 --- linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-05-19 00:06:34.000000000 -0400
66356 +++ linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-08-05 19:44:37.000000000 -0400
66357 @@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66358 }
66359 #endif
66360
66361 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66362 +extern int grsec_enable_blackhole;
66363 +#endif
66364 +
66365 static void tcp_v6_hash(struct sock *sk)
66366 {
66367 if (sk->sk_state != TCP_CLOSE) {
66368 @@ -1660,6 +1664,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66369 return 0;
66370
66371 reset:
66372 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66373 + if (!grsec_enable_blackhole)
66374 +#endif
66375 tcp_v6_send_reset(sk, skb);
66376 discard:
66377 if (opt_skb)
66378 @@ -1739,12 +1746,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66379 TCP_SKB_CB(skb)->sacked = 0;
66380
66381 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66382 - if (!sk)
66383 + if (!sk) {
66384 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66385 + ret = 1;
66386 +#endif
66387 goto no_tcp_socket;
66388 + }
66389
66390 process:
66391 - if (sk->sk_state == TCP_TIME_WAIT)
66392 + if (sk->sk_state == TCP_TIME_WAIT) {
66393 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66394 + ret = 2;
66395 +#endif
66396 goto do_time_wait;
66397 + }
66398
66399 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66400 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66401 @@ -1792,6 +1807,10 @@ no_tcp_socket:
66402 bad_packet:
66403 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66404 } else {
66405 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66406 + if (!grsec_enable_blackhole || (ret == 1 &&
66407 + (skb->dev->flags & IFF_LOOPBACK)))
66408 +#endif
66409 tcp_v6_send_reset(NULL, skb);
66410 }
66411
66412 @@ -2052,7 +2071,13 @@ static void get_openreq6(struct seq_file
66413 uid,
66414 0, /* non standard timer */
66415 0, /* open_requests have no inode */
66416 - 0, req);
66417 + 0,
66418 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66419 + NULL
66420 +#else
66421 + req
66422 +#endif
66423 + );
66424 }
66425
66426 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66427 @@ -2102,7 +2127,12 @@ static void get_tcp6_sock(struct seq_fil
66428 sock_i_uid(sp),
66429 icsk->icsk_probes_out,
66430 sock_i_ino(sp),
66431 - atomic_read(&sp->sk_refcnt), sp,
66432 + atomic_read(&sp->sk_refcnt),
66433 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66434 + NULL,
66435 +#else
66436 + sp,
66437 +#endif
66438 jiffies_to_clock_t(icsk->icsk_rto),
66439 jiffies_to_clock_t(icsk->icsk_ack.ato),
66440 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66441 @@ -2137,7 +2167,13 @@ static void get_timewait6_sock(struct se
66442 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66443 tw->tw_substate, 0, 0,
66444 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66445 - atomic_read(&tw->tw_refcnt), tw);
66446 + atomic_read(&tw->tw_refcnt),
66447 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66448 + NULL
66449 +#else
66450 + tw
66451 +#endif
66452 + );
66453 }
66454
66455 static int tcp6_seq_show(struct seq_file *seq, void *v)
66456 diff -urNp linux-2.6.39.4/net/ipv6/udp.c linux-2.6.39.4/net/ipv6/udp.c
66457 --- linux-2.6.39.4/net/ipv6/udp.c 2011-07-09 09:18:51.000000000 -0400
66458 +++ linux-2.6.39.4/net/ipv6/udp.c 2011-08-05 19:44:37.000000000 -0400
66459 @@ -50,6 +50,10 @@
66460 #include <linux/seq_file.h>
66461 #include "udp_impl.h"
66462
66463 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66464 +extern int grsec_enable_blackhole;
66465 +#endif
66466 +
66467 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66468 {
66469 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66470 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66471
66472 return 0;
66473 drop:
66474 - atomic_inc(&sk->sk_drops);
66475 + atomic_inc_unchecked(&sk->sk_drops);
66476 drop_no_sk_drops_inc:
66477 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66478 kfree_skb(skb);
66479 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66480 continue;
66481 }
66482 drop:
66483 - atomic_inc(&sk->sk_drops);
66484 + atomic_inc_unchecked(&sk->sk_drops);
66485 UDP6_INC_STATS_BH(sock_net(sk),
66486 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66487 UDP6_INC_STATS_BH(sock_net(sk),
66488 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66489 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66490 proto == IPPROTO_UDPLITE);
66491
66492 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66493 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66494 +#endif
66495 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66496
66497 kfree_skb(skb);
66498 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66499 if (!sock_owned_by_user(sk))
66500 udpv6_queue_rcv_skb(sk, skb);
66501 else if (sk_add_backlog(sk, skb)) {
66502 - atomic_inc(&sk->sk_drops);
66503 + atomic_inc_unchecked(&sk->sk_drops);
66504 bh_unlock_sock(sk);
66505 sock_put(sk);
66506 goto discard;
66507 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66508 0, 0L, 0,
66509 sock_i_uid(sp), 0,
66510 sock_i_ino(sp),
66511 - atomic_read(&sp->sk_refcnt), sp,
66512 - atomic_read(&sp->sk_drops));
66513 + atomic_read(&sp->sk_refcnt),
66514 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66515 + NULL,
66516 +#else
66517 + sp,
66518 +#endif
66519 + atomic_read_unchecked(&sp->sk_drops));
66520 }
66521
66522 int udp6_seq_show(struct seq_file *seq, void *v)
66523 diff -urNp linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c
66524 --- linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-05-19 00:06:34.000000000 -0400
66525 +++ linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-08-05 19:44:37.000000000 -0400
66526 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
66527 add_wait_queue(&self->open_wait, &wait);
66528
66529 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66530 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66531 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66532
66533 /* As far as I can see, we protect open_count - Jean II */
66534 spin_lock_irqsave(&self->spinlock, flags);
66535 if (!tty_hung_up_p(filp)) {
66536 extra_count = 1;
66537 - self->open_count--;
66538 + local_dec(&self->open_count);
66539 }
66540 spin_unlock_irqrestore(&self->spinlock, flags);
66541 - self->blocked_open++;
66542 + local_inc(&self->blocked_open);
66543
66544 while (1) {
66545 if (tty->termios->c_cflag & CBAUD) {
66546 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
66547 }
66548
66549 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66550 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66551 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66552
66553 schedule();
66554 }
66555 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
66556 if (extra_count) {
66557 /* ++ is not atomic, so this should be protected - Jean II */
66558 spin_lock_irqsave(&self->spinlock, flags);
66559 - self->open_count++;
66560 + local_inc(&self->open_count);
66561 spin_unlock_irqrestore(&self->spinlock, flags);
66562 }
66563 - self->blocked_open--;
66564 + local_dec(&self->blocked_open);
66565
66566 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66567 - __FILE__,__LINE__, tty->driver->name, self->open_count);
66568 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66569
66570 if (!retval)
66571 self->flags |= ASYNC_NORMAL_ACTIVE;
66572 @@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
66573 }
66574 /* ++ is not atomic, so this should be protected - Jean II */
66575 spin_lock_irqsave(&self->spinlock, flags);
66576 - self->open_count++;
66577 + local_inc(&self->open_count);
66578
66579 tty->driver_data = self;
66580 self->tty = tty;
66581 spin_unlock_irqrestore(&self->spinlock, flags);
66582
66583 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66584 - self->line, self->open_count);
66585 + self->line, local_read(&self->open_count));
66586
66587 /* Not really used by us, but lets do it anyway */
66588 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66589 @@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
66590 return;
66591 }
66592
66593 - if ((tty->count == 1) && (self->open_count != 1)) {
66594 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66595 /*
66596 * Uh, oh. tty->count is 1, which means that the tty
66597 * structure will be freed. state->count should always
66598 @@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
66599 */
66600 IRDA_DEBUG(0, "%s(), bad serial port count; "
66601 "tty->count is 1, state->count is %d\n", __func__ ,
66602 - self->open_count);
66603 - self->open_count = 1;
66604 + local_read(&self->open_count));
66605 + local_set(&self->open_count, 1);
66606 }
66607
66608 - if (--self->open_count < 0) {
66609 + if (local_dec_return(&self->open_count) < 0) {
66610 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66611 - __func__, self->line, self->open_count);
66612 - self->open_count = 0;
66613 + __func__, self->line, local_read(&self->open_count));
66614 + local_set(&self->open_count, 0);
66615 }
66616 - if (self->open_count) {
66617 + if (local_read(&self->open_count)) {
66618 spin_unlock_irqrestore(&self->spinlock, flags);
66619
66620 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66621 @@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
66622 tty->closing = 0;
66623 self->tty = NULL;
66624
66625 - if (self->blocked_open) {
66626 + if (local_read(&self->blocked_open)) {
66627 if (self->close_delay)
66628 schedule_timeout_interruptible(self->close_delay);
66629 wake_up_interruptible(&self->open_wait);
66630 @@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
66631 spin_lock_irqsave(&self->spinlock, flags);
66632 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66633 self->tty = NULL;
66634 - self->open_count = 0;
66635 + local_set(&self->open_count, 0);
66636 spin_unlock_irqrestore(&self->spinlock, flags);
66637
66638 wake_up_interruptible(&self->open_wait);
66639 @@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
66640 seq_putc(m, '\n');
66641
66642 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66643 - seq_printf(m, "Open count: %d\n", self->open_count);
66644 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66645 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66646 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66647
66648 diff -urNp linux-2.6.39.4/net/iucv/af_iucv.c linux-2.6.39.4/net/iucv/af_iucv.c
66649 --- linux-2.6.39.4/net/iucv/af_iucv.c 2011-05-19 00:06:34.000000000 -0400
66650 +++ linux-2.6.39.4/net/iucv/af_iucv.c 2011-08-05 19:44:37.000000000 -0400
66651 @@ -653,10 +653,10 @@ static int iucv_sock_autobind(struct soc
66652
66653 write_lock_bh(&iucv_sk_list.lock);
66654
66655 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66656 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66657 while (__iucv_get_sock_by_name(name)) {
66658 sprintf(name, "%08x",
66659 - atomic_inc_return(&iucv_sk_list.autobind_name));
66660 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66661 }
66662
66663 write_unlock_bh(&iucv_sk_list.lock);
66664 diff -urNp linux-2.6.39.4/net/key/af_key.c linux-2.6.39.4/net/key/af_key.c
66665 --- linux-2.6.39.4/net/key/af_key.c 2011-05-19 00:06:34.000000000 -0400
66666 +++ linux-2.6.39.4/net/key/af_key.c 2011-08-05 19:44:37.000000000 -0400
66667 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66668 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66669 struct xfrm_kmaddress k;
66670
66671 + pax_track_stack();
66672 +
66673 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66674 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66675 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66676 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66677 static u32 get_acqseq(void)
66678 {
66679 u32 res;
66680 - static atomic_t acqseq;
66681 + static atomic_unchecked_t acqseq;
66682
66683 do {
66684 - res = atomic_inc_return(&acqseq);
66685 + res = atomic_inc_return_unchecked(&acqseq);
66686 } while (!res);
66687 return res;
66688 }
66689 @@ -3657,7 +3659,11 @@ static int pfkey_seq_show(struct seq_fil
66690 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
66691 else
66692 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
66693 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66694 + NULL,
66695 +#else
66696 s,
66697 +#endif
66698 atomic_read(&s->sk_refcnt),
66699 sk_rmem_alloc_get(s),
66700 sk_wmem_alloc_get(s),
66701 diff -urNp linux-2.6.39.4/net/lapb/lapb_iface.c linux-2.6.39.4/net/lapb/lapb_iface.c
66702 --- linux-2.6.39.4/net/lapb/lapb_iface.c 2011-05-19 00:06:34.000000000 -0400
66703 +++ linux-2.6.39.4/net/lapb/lapb_iface.c 2011-08-05 20:34:06.000000000 -0400
66704 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66705 goto out;
66706
66707 lapb->dev = dev;
66708 - lapb->callbacks = *callbacks;
66709 + lapb->callbacks = callbacks;
66710
66711 __lapb_insert_cb(lapb);
66712
66713 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66714
66715 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66716 {
66717 - if (lapb->callbacks.connect_confirmation)
66718 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
66719 + if (lapb->callbacks->connect_confirmation)
66720 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
66721 }
66722
66723 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66724 {
66725 - if (lapb->callbacks.connect_indication)
66726 - lapb->callbacks.connect_indication(lapb->dev, reason);
66727 + if (lapb->callbacks->connect_indication)
66728 + lapb->callbacks->connect_indication(lapb->dev, reason);
66729 }
66730
66731 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66732 {
66733 - if (lapb->callbacks.disconnect_confirmation)
66734 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66735 + if (lapb->callbacks->disconnect_confirmation)
66736 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66737 }
66738
66739 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66740 {
66741 - if (lapb->callbacks.disconnect_indication)
66742 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
66743 + if (lapb->callbacks->disconnect_indication)
66744 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
66745 }
66746
66747 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66748 {
66749 - if (lapb->callbacks.data_indication)
66750 - return lapb->callbacks.data_indication(lapb->dev, skb);
66751 + if (lapb->callbacks->data_indication)
66752 + return lapb->callbacks->data_indication(lapb->dev, skb);
66753
66754 kfree_skb(skb);
66755 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66756 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66757 {
66758 int used = 0;
66759
66760 - if (lapb->callbacks.data_transmit) {
66761 - lapb->callbacks.data_transmit(lapb->dev, skb);
66762 + if (lapb->callbacks->data_transmit) {
66763 + lapb->callbacks->data_transmit(lapb->dev, skb);
66764 used = 1;
66765 }
66766
66767 diff -urNp linux-2.6.39.4/net/mac80211/debugfs_sta.c linux-2.6.39.4/net/mac80211/debugfs_sta.c
66768 --- linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-05-19 00:06:34.000000000 -0400
66769 +++ linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-08-05 19:44:37.000000000 -0400
66770 @@ -115,6 +115,8 @@ static ssize_t sta_agg_status_read(struc
66771 struct tid_ampdu_rx *tid_rx;
66772 struct tid_ampdu_tx *tid_tx;
66773
66774 + pax_track_stack();
66775 +
66776 rcu_read_lock();
66777
66778 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66779 @@ -215,6 +217,8 @@ static ssize_t sta_ht_capa_read(struct f
66780 struct sta_info *sta = file->private_data;
66781 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66782
66783 + pax_track_stack();
66784 +
66785 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66786 htc->ht_supported ? "" : "not ");
66787 if (htc->ht_supported) {
66788 diff -urNp linux-2.6.39.4/net/mac80211/ieee80211_i.h linux-2.6.39.4/net/mac80211/ieee80211_i.h
66789 --- linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-05-19 00:06:34.000000000 -0400
66790 +++ linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-08-05 19:44:37.000000000 -0400
66791 @@ -27,6 +27,7 @@
66792 #include <net/ieee80211_radiotap.h>
66793 #include <net/cfg80211.h>
66794 #include <net/mac80211.h>
66795 +#include <asm/local.h>
66796 #include "key.h"
66797 #include "sta_info.h"
66798
66799 @@ -714,7 +715,7 @@ struct ieee80211_local {
66800 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66801 spinlock_t queue_stop_reason_lock;
66802
66803 - int open_count;
66804 + local_t open_count;
66805 int monitors, cooked_mntrs;
66806 /* number of interfaces with corresponding FIF_ flags */
66807 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66808 diff -urNp linux-2.6.39.4/net/mac80211/iface.c linux-2.6.39.4/net/mac80211/iface.c
66809 --- linux-2.6.39.4/net/mac80211/iface.c 2011-05-19 00:06:34.000000000 -0400
66810 +++ linux-2.6.39.4/net/mac80211/iface.c 2011-08-05 19:44:37.000000000 -0400
66811 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66812 break;
66813 }
66814
66815 - if (local->open_count == 0) {
66816 + if (local_read(&local->open_count) == 0) {
66817 res = drv_start(local);
66818 if (res)
66819 goto err_del_bss;
66820 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66821 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66822
66823 if (!is_valid_ether_addr(dev->dev_addr)) {
66824 - if (!local->open_count)
66825 + if (!local_read(&local->open_count))
66826 drv_stop(local);
66827 return -EADDRNOTAVAIL;
66828 }
66829 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66830 mutex_unlock(&local->mtx);
66831
66832 if (coming_up)
66833 - local->open_count++;
66834 + local_inc(&local->open_count);
66835
66836 if (hw_reconf_flags) {
66837 ieee80211_hw_config(local, hw_reconf_flags);
66838 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66839 err_del_interface:
66840 drv_remove_interface(local, &sdata->vif);
66841 err_stop:
66842 - if (!local->open_count)
66843 + if (!local_read(&local->open_count))
66844 drv_stop(local);
66845 err_del_bss:
66846 sdata->bss = NULL;
66847 @@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
66848 }
66849
66850 if (going_down)
66851 - local->open_count--;
66852 + local_dec(&local->open_count);
66853
66854 switch (sdata->vif.type) {
66855 case NL80211_IFTYPE_AP_VLAN:
66856 @@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
66857
66858 ieee80211_recalc_ps(local, -1);
66859
66860 - if (local->open_count == 0) {
66861 + if (local_read(&local->open_count) == 0) {
66862 if (local->ops->napi_poll)
66863 napi_disable(&local->napi);
66864 ieee80211_clear_tx_pending(local);
66865 diff -urNp linux-2.6.39.4/net/mac80211/main.c linux-2.6.39.4/net/mac80211/main.c
66866 --- linux-2.6.39.4/net/mac80211/main.c 2011-05-19 00:06:34.000000000 -0400
66867 +++ linux-2.6.39.4/net/mac80211/main.c 2011-08-05 19:44:37.000000000 -0400
66868 @@ -215,7 +215,7 @@ int ieee80211_hw_config(struct ieee80211
66869 local->hw.conf.power_level = power;
66870 }
66871
66872 - if (changed && local->open_count) {
66873 + if (changed && local_read(&local->open_count)) {
66874 ret = drv_config(local, changed);
66875 /*
66876 * Goal:
66877 diff -urNp linux-2.6.39.4/net/mac80211/mlme.c linux-2.6.39.4/net/mac80211/mlme.c
66878 --- linux-2.6.39.4/net/mac80211/mlme.c 2011-06-03 00:04:14.000000000 -0400
66879 +++ linux-2.6.39.4/net/mac80211/mlme.c 2011-08-05 19:44:37.000000000 -0400
66880 @@ -1431,6 +1431,8 @@ static bool ieee80211_assoc_success(stru
66881 bool have_higher_than_11mbit = false;
66882 u16 ap_ht_cap_flags;
66883
66884 + pax_track_stack();
66885 +
66886 /* AssocResp and ReassocResp have identical structure */
66887
66888 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66889 diff -urNp linux-2.6.39.4/net/mac80211/pm.c linux-2.6.39.4/net/mac80211/pm.c
66890 --- linux-2.6.39.4/net/mac80211/pm.c 2011-05-19 00:06:34.000000000 -0400
66891 +++ linux-2.6.39.4/net/mac80211/pm.c 2011-08-05 19:44:37.000000000 -0400
66892 @@ -95,7 +95,7 @@ int __ieee80211_suspend(struct ieee80211
66893 }
66894
66895 /* stop hardware - this must stop RX */
66896 - if (local->open_count)
66897 + if (local_read(&local->open_count))
66898 ieee80211_stop_device(local);
66899
66900 local->suspended = true;
66901 diff -urNp linux-2.6.39.4/net/mac80211/rate.c linux-2.6.39.4/net/mac80211/rate.c
66902 --- linux-2.6.39.4/net/mac80211/rate.c 2011-05-19 00:06:34.000000000 -0400
66903 +++ linux-2.6.39.4/net/mac80211/rate.c 2011-08-05 19:44:37.000000000 -0400
66904 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66905
66906 ASSERT_RTNL();
66907
66908 - if (local->open_count)
66909 + if (local_read(&local->open_count))
66910 return -EBUSY;
66911
66912 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
66913 diff -urNp linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c
66914 --- linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-05-19 00:06:34.000000000 -0400
66915 +++ linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-05 19:44:37.000000000 -0400
66916 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
66917
66918 spin_unlock_irqrestore(&events->lock, status);
66919
66920 - if (copy_to_user(buf, pb, p))
66921 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
66922 return -EFAULT;
66923
66924 return p;
66925 diff -urNp linux-2.6.39.4/net/mac80211/util.c linux-2.6.39.4/net/mac80211/util.c
66926 --- linux-2.6.39.4/net/mac80211/util.c 2011-05-19 00:06:34.000000000 -0400
66927 +++ linux-2.6.39.4/net/mac80211/util.c 2011-08-05 19:44:37.000000000 -0400
66928 @@ -1129,7 +1129,7 @@ int ieee80211_reconfig(struct ieee80211_
66929 local->resuming = true;
66930
66931 /* restart hardware */
66932 - if (local->open_count) {
66933 + if (local_read(&local->open_count)) {
66934 /*
66935 * Upon resume hardware can sometimes be goofy due to
66936 * various platform / driver / bus issues, so restarting
66937 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c
66938 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-09 09:18:51.000000000 -0400
66939 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-05 19:44:37.000000000 -0400
66940 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
66941 /* Increase the refcnt counter of the dest */
66942 atomic_inc(&dest->refcnt);
66943
66944 - conn_flags = atomic_read(&dest->conn_flags);
66945 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
66946 if (cp->protocol != IPPROTO_UDP)
66947 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
66948 /* Bind with the destination and its corresponding transmitter */
66949 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
66950 atomic_set(&cp->refcnt, 1);
66951
66952 atomic_set(&cp->n_control, 0);
66953 - atomic_set(&cp->in_pkts, 0);
66954 + atomic_set_unchecked(&cp->in_pkts, 0);
66955
66956 atomic_inc(&ipvs->conn_count);
66957 if (flags & IP_VS_CONN_F_NO_CPORT)
66958 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
66959
66960 /* Don't drop the entry if its number of incoming packets is not
66961 located in [0, 8] */
66962 - i = atomic_read(&cp->in_pkts);
66963 + i = atomic_read_unchecked(&cp->in_pkts);
66964 if (i > 8 || i < 0) return 0;
66965
66966 if (!todrop_rate[i]) return 0;
66967 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c
66968 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-09 09:18:51.000000000 -0400
66969 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-05 19:44:37.000000000 -0400
66970 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
66971 ret = cp->packet_xmit(skb, cp, pd->pp);
66972 /* do not touch skb anymore */
66973
66974 - atomic_inc(&cp->in_pkts);
66975 + atomic_inc_unchecked(&cp->in_pkts);
66976 ip_vs_conn_put(cp);
66977 return ret;
66978 }
66979 @@ -1633,7 +1633,7 @@ ip_vs_in(unsigned int hooknum, struct sk
66980 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
66981 pkts = sysctl_sync_threshold(ipvs);
66982 else
66983 - pkts = atomic_add_return(1, &cp->in_pkts);
66984 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
66985
66986 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
66987 cp->protocol == IPPROTO_SCTP) {
66988 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c
66989 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-19 00:06:34.000000000 -0400
66990 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-05 19:44:37.000000000 -0400
66991 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
66992 ip_vs_rs_hash(ipvs, dest);
66993 write_unlock_bh(&ipvs->rs_lock);
66994 }
66995 - atomic_set(&dest->conn_flags, conn_flags);
66996 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
66997
66998 /* bind the service */
66999 if (!dest->svc) {
67000 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
67001 " %-7s %-6d %-10d %-10d\n",
67002 &dest->addr.in6,
67003 ntohs(dest->port),
67004 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67005 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67006 atomic_read(&dest->weight),
67007 atomic_read(&dest->activeconns),
67008 atomic_read(&dest->inactconns));
67009 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
67010 "%-7s %-6d %-10d %-10d\n",
67011 ntohl(dest->addr.ip),
67012 ntohs(dest->port),
67013 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67014 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67015 atomic_read(&dest->weight),
67016 atomic_read(&dest->activeconns),
67017 atomic_read(&dest->inactconns));
67018 @@ -2287,6 +2287,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
67019 struct ip_vs_dest_user *udest_compat;
67020 struct ip_vs_dest_user_kern udest;
67021
67022 + pax_track_stack();
67023 +
67024 if (!capable(CAP_NET_ADMIN))
67025 return -EPERM;
67026
67027 @@ -2501,7 +2503,7 @@ __ip_vs_get_dest_entries(struct net *net
67028
67029 entry.addr = dest->addr.ip;
67030 entry.port = dest->port;
67031 - entry.conn_flags = atomic_read(&dest->conn_flags);
67032 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
67033 entry.weight = atomic_read(&dest->weight);
67034 entry.u_threshold = dest->u_threshold;
67035 entry.l_threshold = dest->l_threshold;
67036 @@ -3029,7 +3031,7 @@ static int ip_vs_genl_fill_dest(struct s
67037 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
67038
67039 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
67040 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67041 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67042 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
67043 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
67044 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
67045 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c
67046 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-05-19 00:06:34.000000000 -0400
67047 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-05 19:44:37.000000000 -0400
67048 @@ -648,7 +648,7 @@ control:
67049 * i.e only increment in_pkts for Templates.
67050 */
67051 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
67052 - int pkts = atomic_add_return(1, &cp->in_pkts);
67053 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67054
67055 if (pkts % sysctl_sync_period(ipvs) != 1)
67056 return;
67057 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
67058
67059 if (opt)
67060 memcpy(&cp->in_seq, opt, sizeof(*opt));
67061 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67062 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67063 cp->state = state;
67064 cp->old_state = cp->state;
67065 /*
67066 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c
67067 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-19 00:06:34.000000000 -0400
67068 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-05 19:44:37.000000000 -0400
67069 @@ -1127,7 +1127,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
67070 else
67071 rc = NF_ACCEPT;
67072 /* do not touch skb anymore */
67073 - atomic_inc(&cp->in_pkts);
67074 + atomic_inc_unchecked(&cp->in_pkts);
67075 goto out;
67076 }
67077
67078 @@ -1245,7 +1245,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
67079 else
67080 rc = NF_ACCEPT;
67081 /* do not touch skb anymore */
67082 - atomic_inc(&cp->in_pkts);
67083 + atomic_inc_unchecked(&cp->in_pkts);
67084 goto out;
67085 }
67086
67087 diff -urNp linux-2.6.39.4/net/netfilter/Kconfig linux-2.6.39.4/net/netfilter/Kconfig
67088 --- linux-2.6.39.4/net/netfilter/Kconfig 2011-05-19 00:06:34.000000000 -0400
67089 +++ linux-2.6.39.4/net/netfilter/Kconfig 2011-08-05 19:44:37.000000000 -0400
67090 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
67091
67092 To compile it as a module, choose M here. If unsure, say N.
67093
67094 +config NETFILTER_XT_MATCH_GRADM
67095 + tristate '"gradm" match support'
67096 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
67097 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
67098 + ---help---
67099 + The gradm match allows to match on grsecurity RBAC being enabled.
67100 + It is useful when iptables rules are applied early on bootup to
67101 + prevent connections to the machine (except from a trusted host)
67102 + while the RBAC system is disabled.
67103 +
67104 config NETFILTER_XT_MATCH_HASHLIMIT
67105 tristate '"hashlimit" match support'
67106 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
67107 diff -urNp linux-2.6.39.4/net/netfilter/Makefile linux-2.6.39.4/net/netfilter/Makefile
67108 --- linux-2.6.39.4/net/netfilter/Makefile 2011-05-19 00:06:34.000000000 -0400
67109 +++ linux-2.6.39.4/net/netfilter/Makefile 2011-08-05 19:44:37.000000000 -0400
67110 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
67111 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
67112 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
67113 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
67114 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
67115 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
67116 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
67117 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
67118 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_log.c linux-2.6.39.4/net/netfilter/nfnetlink_log.c
67119 --- linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-05-19 00:06:34.000000000 -0400
67120 +++ linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-08-05 19:44:37.000000000 -0400
67121 @@ -70,7 +70,7 @@ struct nfulnl_instance {
67122 };
67123
67124 static DEFINE_SPINLOCK(instances_lock);
67125 -static atomic_t global_seq;
67126 +static atomic_unchecked_t global_seq;
67127
67128 #define INSTANCE_BUCKETS 16
67129 static struct hlist_head instance_table[INSTANCE_BUCKETS];
67130 @@ -506,7 +506,7 @@ __build_packet_message(struct nfulnl_ins
67131 /* global sequence number */
67132 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
67133 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
67134 - htonl(atomic_inc_return(&global_seq)));
67135 + htonl(atomic_inc_return_unchecked(&global_seq)));
67136
67137 if (data_len) {
67138 struct nlattr *nla;
67139 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_queue.c linux-2.6.39.4/net/netfilter/nfnetlink_queue.c
67140 --- linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-05-19 00:06:34.000000000 -0400
67141 +++ linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-08-05 19:44:37.000000000 -0400
67142 @@ -58,7 +58,7 @@ struct nfqnl_instance {
67143 */
67144 spinlock_t lock;
67145 unsigned int queue_total;
67146 - atomic_t id_sequence; /* 'sequence' of pkt ids */
67147 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
67148 struct list_head queue_list; /* packets in queue */
67149 };
67150
67151 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
67152 nfmsg->version = NFNETLINK_V0;
67153 nfmsg->res_id = htons(queue->queue_num);
67154
67155 - entry->id = atomic_inc_return(&queue->id_sequence);
67156 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
67157 pmsg.packet_id = htonl(entry->id);
67158 pmsg.hw_protocol = entskb->protocol;
67159 pmsg.hook = entry->hook;
67160 @@ -869,7 +869,7 @@ static int seq_show(struct seq_file *s,
67161 inst->peer_pid, inst->queue_total,
67162 inst->copy_mode, inst->copy_range,
67163 inst->queue_dropped, inst->queue_user_dropped,
67164 - atomic_read(&inst->id_sequence), 1);
67165 + atomic_read_unchecked(&inst->id_sequence), 1);
67166 }
67167
67168 static const struct seq_operations nfqnl_seq_ops = {
67169 diff -urNp linux-2.6.39.4/net/netfilter/xt_gradm.c linux-2.6.39.4/net/netfilter/xt_gradm.c
67170 --- linux-2.6.39.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
67171 +++ linux-2.6.39.4/net/netfilter/xt_gradm.c 2011-08-05 19:44:37.000000000 -0400
67172 @@ -0,0 +1,51 @@
67173 +/*
67174 + * gradm match for netfilter
67175 + * Copyright © Zbigniew Krzystolik, 2010
67176 + *
67177 + * This program is free software; you can redistribute it and/or modify
67178 + * it under the terms of the GNU General Public License; either version
67179 + * 2 or 3 as published by the Free Software Foundation.
67180 + */
67181 +#include <linux/module.h>
67182 +#include <linux/moduleparam.h>
67183 +#include <linux/skbuff.h>
67184 +#include <linux/netfilter/x_tables.h>
67185 +#include <linux/grsecurity.h>
67186 +#include <linux/netfilter/xt_gradm.h>
67187 +
67188 +static bool
67189 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
67190 +{
67191 + const struct xt_gradm_mtinfo *info = par->matchinfo;
67192 + bool retval = false;
67193 + if (gr_acl_is_enabled())
67194 + retval = true;
67195 + return retval ^ info->invflags;
67196 +}
67197 +
67198 +static struct xt_match gradm_mt_reg __read_mostly = {
67199 + .name = "gradm",
67200 + .revision = 0,
67201 + .family = NFPROTO_UNSPEC,
67202 + .match = gradm_mt,
67203 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
67204 + .me = THIS_MODULE,
67205 +};
67206 +
67207 +static int __init gradm_mt_init(void)
67208 +{
67209 + return xt_register_match(&gradm_mt_reg);
67210 +}
67211 +
67212 +static void __exit gradm_mt_exit(void)
67213 +{
67214 + xt_unregister_match(&gradm_mt_reg);
67215 +}
67216 +
67217 +module_init(gradm_mt_init);
67218 +module_exit(gradm_mt_exit);
67219 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
67220 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
67221 +MODULE_LICENSE("GPL");
67222 +MODULE_ALIAS("ipt_gradm");
67223 +MODULE_ALIAS("ip6t_gradm");
67224 diff -urNp linux-2.6.39.4/net/netfilter/xt_statistic.c linux-2.6.39.4/net/netfilter/xt_statistic.c
67225 --- linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-05-19 00:06:34.000000000 -0400
67226 +++ linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-08-05 19:44:37.000000000 -0400
67227 @@ -18,7 +18,7 @@
67228 #include <linux/netfilter/x_tables.h>
67229
67230 struct xt_statistic_priv {
67231 - atomic_t count;
67232 + atomic_unchecked_t count;
67233 } ____cacheline_aligned_in_smp;
67234
67235 MODULE_LICENSE("GPL");
67236 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
67237 break;
67238 case XT_STATISTIC_MODE_NTH:
67239 do {
67240 - oval = atomic_read(&info->master->count);
67241 + oval = atomic_read_unchecked(&info->master->count);
67242 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
67243 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
67244 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
67245 if (nval == 0)
67246 ret = !ret;
67247 break;
67248 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
67249 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
67250 if (info->master == NULL)
67251 return -ENOMEM;
67252 - atomic_set(&info->master->count, info->u.nth.count);
67253 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
67254
67255 return 0;
67256 }
67257 diff -urNp linux-2.6.39.4/net/netlink/af_netlink.c linux-2.6.39.4/net/netlink/af_netlink.c
67258 --- linux-2.6.39.4/net/netlink/af_netlink.c 2011-05-19 00:06:34.000000000 -0400
67259 +++ linux-2.6.39.4/net/netlink/af_netlink.c 2011-08-05 19:44:37.000000000 -0400
67260 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
67261 sk->sk_error_report(sk);
67262 }
67263 }
67264 - atomic_inc(&sk->sk_drops);
67265 + atomic_inc_unchecked(&sk->sk_drops);
67266 }
67267
67268 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
67269 @@ -1992,15 +1992,23 @@ static int netlink_seq_show(struct seq_f
67270 struct netlink_sock *nlk = nlk_sk(s);
67271
67272 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
67273 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67274 + NULL,
67275 +#else
67276 s,
67277 +#endif
67278 s->sk_protocol,
67279 nlk->pid,
67280 nlk->groups ? (u32)nlk->groups[0] : 0,
67281 sk_rmem_alloc_get(s),
67282 sk_wmem_alloc_get(s),
67283 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67284 + NULL,
67285 +#else
67286 nlk->cb,
67287 +#endif
67288 atomic_read(&s->sk_refcnt),
67289 - atomic_read(&s->sk_drops),
67290 + atomic_read_unchecked(&s->sk_drops),
67291 sock_i_ino(s)
67292 );
67293
67294 diff -urNp linux-2.6.39.4/net/netrom/af_netrom.c linux-2.6.39.4/net/netrom/af_netrom.c
67295 --- linux-2.6.39.4/net/netrom/af_netrom.c 2011-05-19 00:06:34.000000000 -0400
67296 +++ linux-2.6.39.4/net/netrom/af_netrom.c 2011-08-05 19:44:37.000000000 -0400
67297 @@ -840,6 +840,7 @@ static int nr_getname(struct socket *soc
67298 struct sock *sk = sock->sk;
67299 struct nr_sock *nr = nr_sk(sk);
67300
67301 + memset(sax, 0, sizeof(*sax));
67302 lock_sock(sk);
67303 if (peer != 0) {
67304 if (sk->sk_state != TCP_ESTABLISHED) {
67305 @@ -854,7 +855,6 @@ static int nr_getname(struct socket *soc
67306 *uaddr_len = sizeof(struct full_sockaddr_ax25);
67307 } else {
67308 sax->fsa_ax25.sax25_family = AF_NETROM;
67309 - sax->fsa_ax25.sax25_ndigis = 0;
67310 sax->fsa_ax25.sax25_call = nr->source_addr;
67311 *uaddr_len = sizeof(struct sockaddr_ax25);
67312 }
67313 diff -urNp linux-2.6.39.4/net/packet/af_packet.c linux-2.6.39.4/net/packet/af_packet.c
67314 --- linux-2.6.39.4/net/packet/af_packet.c 2011-07-09 09:18:51.000000000 -0400
67315 +++ linux-2.6.39.4/net/packet/af_packet.c 2011-08-05 19:44:37.000000000 -0400
67316 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
67317
67318 spin_lock(&sk->sk_receive_queue.lock);
67319 po->stats.tp_packets++;
67320 - skb->dropcount = atomic_read(&sk->sk_drops);
67321 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
67322 __skb_queue_tail(&sk->sk_receive_queue, skb);
67323 spin_unlock(&sk->sk_receive_queue.lock);
67324 sk->sk_data_ready(sk, skb->len);
67325 return 0;
67326
67327 drop_n_acct:
67328 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
67329 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
67330
67331 drop_n_restore:
67332 if (skb_head != skb->data && skb_shared(skb)) {
67333 @@ -2159,7 +2159,7 @@ static int packet_getsockopt(struct sock
67334 case PACKET_HDRLEN:
67335 if (len > sizeof(int))
67336 len = sizeof(int);
67337 - if (copy_from_user(&val, optval, len))
67338 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
67339 return -EFAULT;
67340 switch (val) {
67341 case TPACKET_V1:
67342 @@ -2197,7 +2197,7 @@ static int packet_getsockopt(struct sock
67343
67344 if (put_user(len, optlen))
67345 return -EFAULT;
67346 - if (copy_to_user(optval, data, len))
67347 + if (len > sizeof(st) || copy_to_user(optval, data, len))
67348 return -EFAULT;
67349 return 0;
67350 }
67351 @@ -2709,7 +2709,11 @@ static int packet_seq_show(struct seq_fi
67352
67353 seq_printf(seq,
67354 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
67355 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67356 + NULL,
67357 +#else
67358 s,
67359 +#endif
67360 atomic_read(&s->sk_refcnt),
67361 s->sk_type,
67362 ntohs(po->num),
67363 diff -urNp linux-2.6.39.4/net/phonet/af_phonet.c linux-2.6.39.4/net/phonet/af_phonet.c
67364 --- linux-2.6.39.4/net/phonet/af_phonet.c 2011-05-19 00:06:34.000000000 -0400
67365 +++ linux-2.6.39.4/net/phonet/af_phonet.c 2011-08-05 20:34:06.000000000 -0400
67366 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
67367 {
67368 struct phonet_protocol *pp;
67369
67370 - if (protocol >= PHONET_NPROTO)
67371 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67372 return NULL;
67373
67374 rcu_read_lock();
67375 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
67376 {
67377 int err = 0;
67378
67379 - if (protocol >= PHONET_NPROTO)
67380 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67381 return -EINVAL;
67382
67383 err = proto_register(pp->prot, 1);
67384 diff -urNp linux-2.6.39.4/net/phonet/pep.c linux-2.6.39.4/net/phonet/pep.c
67385 --- linux-2.6.39.4/net/phonet/pep.c 2011-05-19 00:06:34.000000000 -0400
67386 +++ linux-2.6.39.4/net/phonet/pep.c 2011-08-05 19:44:37.000000000 -0400
67387 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
67388
67389 case PNS_PEP_CTRL_REQ:
67390 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
67391 - atomic_inc(&sk->sk_drops);
67392 + atomic_inc_unchecked(&sk->sk_drops);
67393 break;
67394 }
67395 __skb_pull(skb, 4);
67396 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
67397 }
67398
67399 if (pn->rx_credits == 0) {
67400 - atomic_inc(&sk->sk_drops);
67401 + atomic_inc_unchecked(&sk->sk_drops);
67402 err = -ENOBUFS;
67403 break;
67404 }
67405 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
67406 }
67407
67408 if (pn->rx_credits == 0) {
67409 - atomic_inc(&sk->sk_drops);
67410 + atomic_inc_unchecked(&sk->sk_drops);
67411 err = NET_RX_DROP;
67412 break;
67413 }
67414 diff -urNp linux-2.6.39.4/net/phonet/socket.c linux-2.6.39.4/net/phonet/socket.c
67415 --- linux-2.6.39.4/net/phonet/socket.c 2011-05-19 00:06:34.000000000 -0400
67416 +++ linux-2.6.39.4/net/phonet/socket.c 2011-08-05 19:44:37.000000000 -0400
67417 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_f
67418 pn->resource, sk->sk_state,
67419 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
67420 sock_i_uid(sk), sock_i_ino(sk),
67421 - atomic_read(&sk->sk_refcnt), sk,
67422 - atomic_read(&sk->sk_drops), &len);
67423 + atomic_read(&sk->sk_refcnt),
67424 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67425 + NULL,
67426 +#else
67427 + sk,
67428 +#endif
67429 + atomic_read_unchecked(&sk->sk_drops), &len);
67430 }
67431 seq_printf(seq, "%*s\n", 127 - len, "");
67432 return 0;
67433 diff -urNp linux-2.6.39.4/net/rds/cong.c linux-2.6.39.4/net/rds/cong.c
67434 --- linux-2.6.39.4/net/rds/cong.c 2011-05-19 00:06:34.000000000 -0400
67435 +++ linux-2.6.39.4/net/rds/cong.c 2011-08-05 19:44:37.000000000 -0400
67436 @@ -77,7 +77,7 @@
67437 * finds that the saved generation number is smaller than the global generation
67438 * number, it wakes up the process.
67439 */
67440 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
67441 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
67442
67443 /*
67444 * Congestion monitoring
67445 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
67446 rdsdebug("waking map %p for %pI4\n",
67447 map, &map->m_addr);
67448 rds_stats_inc(s_cong_update_received);
67449 - atomic_inc(&rds_cong_generation);
67450 + atomic_inc_unchecked(&rds_cong_generation);
67451 if (waitqueue_active(&map->m_waitq))
67452 wake_up(&map->m_waitq);
67453 if (waitqueue_active(&rds_poll_waitq))
67454 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
67455
67456 int rds_cong_updated_since(unsigned long *recent)
67457 {
67458 - unsigned long gen = atomic_read(&rds_cong_generation);
67459 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
67460
67461 if (likely(*recent == gen))
67462 return 0;
67463 diff -urNp linux-2.6.39.4/net/rds/ib_cm.c linux-2.6.39.4/net/rds/ib_cm.c
67464 --- linux-2.6.39.4/net/rds/ib_cm.c 2011-05-19 00:06:34.000000000 -0400
67465 +++ linux-2.6.39.4/net/rds/ib_cm.c 2011-08-05 19:44:37.000000000 -0400
67466 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
67467 /* Clear the ACK state */
67468 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67469 #ifdef KERNEL_HAS_ATOMIC64
67470 - atomic64_set(&ic->i_ack_next, 0);
67471 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67472 #else
67473 ic->i_ack_next = 0;
67474 #endif
67475 diff -urNp linux-2.6.39.4/net/rds/ib.h linux-2.6.39.4/net/rds/ib.h
67476 --- linux-2.6.39.4/net/rds/ib.h 2011-05-19 00:06:34.000000000 -0400
67477 +++ linux-2.6.39.4/net/rds/ib.h 2011-08-05 19:44:37.000000000 -0400
67478 @@ -127,7 +127,7 @@ struct rds_ib_connection {
67479 /* sending acks */
67480 unsigned long i_ack_flags;
67481 #ifdef KERNEL_HAS_ATOMIC64
67482 - atomic64_t i_ack_next; /* next ACK to send */
67483 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67484 #else
67485 spinlock_t i_ack_lock; /* protect i_ack_next */
67486 u64 i_ack_next; /* next ACK to send */
67487 diff -urNp linux-2.6.39.4/net/rds/ib_recv.c linux-2.6.39.4/net/rds/ib_recv.c
67488 --- linux-2.6.39.4/net/rds/ib_recv.c 2011-05-19 00:06:34.000000000 -0400
67489 +++ linux-2.6.39.4/net/rds/ib_recv.c 2011-08-05 19:44:37.000000000 -0400
67490 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67491 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
67492 int ack_required)
67493 {
67494 - atomic64_set(&ic->i_ack_next, seq);
67495 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67496 if (ack_required) {
67497 smp_mb__before_clear_bit();
67498 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67499 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67500 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67501 smp_mb__after_clear_bit();
67502
67503 - return atomic64_read(&ic->i_ack_next);
67504 + return atomic64_read_unchecked(&ic->i_ack_next);
67505 }
67506 #endif
67507
67508 diff -urNp linux-2.6.39.4/net/rds/iw_cm.c linux-2.6.39.4/net/rds/iw_cm.c
67509 --- linux-2.6.39.4/net/rds/iw_cm.c 2011-05-19 00:06:34.000000000 -0400
67510 +++ linux-2.6.39.4/net/rds/iw_cm.c 2011-08-05 19:44:37.000000000 -0400
67511 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
67512 /* Clear the ACK state */
67513 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67514 #ifdef KERNEL_HAS_ATOMIC64
67515 - atomic64_set(&ic->i_ack_next, 0);
67516 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67517 #else
67518 ic->i_ack_next = 0;
67519 #endif
67520 diff -urNp linux-2.6.39.4/net/rds/iw.h linux-2.6.39.4/net/rds/iw.h
67521 --- linux-2.6.39.4/net/rds/iw.h 2011-05-19 00:06:34.000000000 -0400
67522 +++ linux-2.6.39.4/net/rds/iw.h 2011-08-05 19:44:37.000000000 -0400
67523 @@ -133,7 +133,7 @@ struct rds_iw_connection {
67524 /* sending acks */
67525 unsigned long i_ack_flags;
67526 #ifdef KERNEL_HAS_ATOMIC64
67527 - atomic64_t i_ack_next; /* next ACK to send */
67528 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67529 #else
67530 spinlock_t i_ack_lock; /* protect i_ack_next */
67531 u64 i_ack_next; /* next ACK to send */
67532 diff -urNp linux-2.6.39.4/net/rds/iw_rdma.c linux-2.6.39.4/net/rds/iw_rdma.c
67533 --- linux-2.6.39.4/net/rds/iw_rdma.c 2011-05-19 00:06:34.000000000 -0400
67534 +++ linux-2.6.39.4/net/rds/iw_rdma.c 2011-08-05 19:44:37.000000000 -0400
67535 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
67536 struct rdma_cm_id *pcm_id;
67537 int rc;
67538
67539 + pax_track_stack();
67540 +
67541 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
67542 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
67543
67544 diff -urNp linux-2.6.39.4/net/rds/iw_recv.c linux-2.6.39.4/net/rds/iw_recv.c
67545 --- linux-2.6.39.4/net/rds/iw_recv.c 2011-05-19 00:06:34.000000000 -0400
67546 +++ linux-2.6.39.4/net/rds/iw_recv.c 2011-08-05 19:44:37.000000000 -0400
67547 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67548 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
67549 int ack_required)
67550 {
67551 - atomic64_set(&ic->i_ack_next, seq);
67552 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67553 if (ack_required) {
67554 smp_mb__before_clear_bit();
67555 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67556 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67557 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67558 smp_mb__after_clear_bit();
67559
67560 - return atomic64_read(&ic->i_ack_next);
67561 + return atomic64_read_unchecked(&ic->i_ack_next);
67562 }
67563 #endif
67564
67565 diff -urNp linux-2.6.39.4/net/rxrpc/af_rxrpc.c linux-2.6.39.4/net/rxrpc/af_rxrpc.c
67566 --- linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-05-19 00:06:34.000000000 -0400
67567 +++ linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-08-05 19:44:37.000000000 -0400
67568 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
67569 __be32 rxrpc_epoch;
67570
67571 /* current debugging ID */
67572 -atomic_t rxrpc_debug_id;
67573 +atomic_unchecked_t rxrpc_debug_id;
67574
67575 /* count of skbs currently in use */
67576 atomic_t rxrpc_n_skbs;
67577 diff -urNp linux-2.6.39.4/net/rxrpc/ar-ack.c linux-2.6.39.4/net/rxrpc/ar-ack.c
67578 --- linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-05-19 00:06:34.000000000 -0400
67579 +++ linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-08-05 19:44:37.000000000 -0400
67580 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
67581
67582 _enter("{%d,%d,%d,%d},",
67583 call->acks_hard, call->acks_unacked,
67584 - atomic_read(&call->sequence),
67585 + atomic_read_unchecked(&call->sequence),
67586 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
67587
67588 stop = 0;
67589 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
67590
67591 /* each Tx packet has a new serial number */
67592 sp->hdr.serial =
67593 - htonl(atomic_inc_return(&call->conn->serial));
67594 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
67595
67596 hdr = (struct rxrpc_header *) txb->head;
67597 hdr->serial = sp->hdr.serial;
67598 @@ -405,7 +405,7 @@ static void rxrpc_rotate_tx_window(struc
67599 */
67600 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
67601 {
67602 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
67603 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
67604 }
67605
67606 /*
67607 @@ -631,7 +631,7 @@ process_further:
67608
67609 latest = ntohl(sp->hdr.serial);
67610 hard = ntohl(ack.firstPacket);
67611 - tx = atomic_read(&call->sequence);
67612 + tx = atomic_read_unchecked(&call->sequence);
67613
67614 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67615 latest,
67616 @@ -844,6 +844,8 @@ void rxrpc_process_call(struct work_stru
67617 u32 abort_code = RX_PROTOCOL_ERROR;
67618 u8 *acks = NULL;
67619
67620 + pax_track_stack();
67621 +
67622 //printk("\n--------------------\n");
67623 _enter("{%d,%s,%lx} [%lu]",
67624 call->debug_id, rxrpc_call_states[call->state], call->events,
67625 @@ -1163,7 +1165,7 @@ void rxrpc_process_call(struct work_stru
67626 goto maybe_reschedule;
67627
67628 send_ACK_with_skew:
67629 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
67630 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
67631 ntohl(ack.serial));
67632 send_ACK:
67633 mtu = call->conn->trans->peer->if_mtu;
67634 @@ -1175,7 +1177,7 @@ send_ACK:
67635 ackinfo.rxMTU = htonl(5692);
67636 ackinfo.jumbo_max = htonl(4);
67637
67638 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67639 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67640 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67641 ntohl(hdr.serial),
67642 ntohs(ack.maxSkew),
67643 @@ -1193,7 +1195,7 @@ send_ACK:
67644 send_message:
67645 _debug("send message");
67646
67647 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67648 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67649 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
67650 send_message_2:
67651
67652 diff -urNp linux-2.6.39.4/net/rxrpc/ar-call.c linux-2.6.39.4/net/rxrpc/ar-call.c
67653 --- linux-2.6.39.4/net/rxrpc/ar-call.c 2011-05-19 00:06:34.000000000 -0400
67654 +++ linux-2.6.39.4/net/rxrpc/ar-call.c 2011-08-05 19:44:37.000000000 -0400
67655 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
67656 spin_lock_init(&call->lock);
67657 rwlock_init(&call->state_lock);
67658 atomic_set(&call->usage, 1);
67659 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67660 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67661 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
67662
67663 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
67664 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connection.c linux-2.6.39.4/net/rxrpc/ar-connection.c
67665 --- linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-05-19 00:06:34.000000000 -0400
67666 +++ linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-08-05 19:44:37.000000000 -0400
67667 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
67668 rwlock_init(&conn->lock);
67669 spin_lock_init(&conn->state_lock);
67670 atomic_set(&conn->usage, 1);
67671 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
67672 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67673 conn->avail_calls = RXRPC_MAXCALLS;
67674 conn->size_align = 4;
67675 conn->header_size = sizeof(struct rxrpc_header);
67676 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connevent.c linux-2.6.39.4/net/rxrpc/ar-connevent.c
67677 --- linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-05-19 00:06:34.000000000 -0400
67678 +++ linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-08-05 19:44:37.000000000 -0400
67679 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
67680
67681 len = iov[0].iov_len + iov[1].iov_len;
67682
67683 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67684 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67685 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
67686
67687 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67688 diff -urNp linux-2.6.39.4/net/rxrpc/ar-input.c linux-2.6.39.4/net/rxrpc/ar-input.c
67689 --- linux-2.6.39.4/net/rxrpc/ar-input.c 2011-05-19 00:06:34.000000000 -0400
67690 +++ linux-2.6.39.4/net/rxrpc/ar-input.c 2011-08-05 19:44:37.000000000 -0400
67691 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
67692 /* track the latest serial number on this connection for ACK packet
67693 * information */
67694 serial = ntohl(sp->hdr.serial);
67695 - hi_serial = atomic_read(&call->conn->hi_serial);
67696 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
67697 while (serial > hi_serial)
67698 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
67699 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
67700 serial);
67701
67702 /* request ACK generation for any ACK or DATA packet that requests
67703 diff -urNp linux-2.6.39.4/net/rxrpc/ar-internal.h linux-2.6.39.4/net/rxrpc/ar-internal.h
67704 --- linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-05-19 00:06:34.000000000 -0400
67705 +++ linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-08-05 19:44:37.000000000 -0400
67706 @@ -272,8 +272,8 @@ struct rxrpc_connection {
67707 int error; /* error code for local abort */
67708 int debug_id; /* debug ID for printks */
67709 unsigned call_counter; /* call ID counter */
67710 - atomic_t serial; /* packet serial number counter */
67711 - atomic_t hi_serial; /* highest serial number received */
67712 + atomic_unchecked_t serial; /* packet serial number counter */
67713 + atomic_unchecked_t hi_serial; /* highest serial number received */
67714 u8 avail_calls; /* number of calls available */
67715 u8 size_align; /* data size alignment (for security) */
67716 u8 header_size; /* rxrpc + security header size */
67717 @@ -346,7 +346,7 @@ struct rxrpc_call {
67718 spinlock_t lock;
67719 rwlock_t state_lock; /* lock for state transition */
67720 atomic_t usage;
67721 - atomic_t sequence; /* Tx data packet sequence counter */
67722 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
67723 u32 abort_code; /* local/remote abort code */
67724 enum { /* current state of call */
67725 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
67726 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
67727 */
67728 extern atomic_t rxrpc_n_skbs;
67729 extern __be32 rxrpc_epoch;
67730 -extern atomic_t rxrpc_debug_id;
67731 +extern atomic_unchecked_t rxrpc_debug_id;
67732 extern struct workqueue_struct *rxrpc_workqueue;
67733
67734 /*
67735 diff -urNp linux-2.6.39.4/net/rxrpc/ar-local.c linux-2.6.39.4/net/rxrpc/ar-local.c
67736 --- linux-2.6.39.4/net/rxrpc/ar-local.c 2011-05-19 00:06:34.000000000 -0400
67737 +++ linux-2.6.39.4/net/rxrpc/ar-local.c 2011-08-05 19:44:37.000000000 -0400
67738 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
67739 spin_lock_init(&local->lock);
67740 rwlock_init(&local->services_lock);
67741 atomic_set(&local->usage, 1);
67742 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
67743 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67744 memcpy(&local->srx, srx, sizeof(*srx));
67745 }
67746
67747 diff -urNp linux-2.6.39.4/net/rxrpc/ar-output.c linux-2.6.39.4/net/rxrpc/ar-output.c
67748 --- linux-2.6.39.4/net/rxrpc/ar-output.c 2011-05-19 00:06:34.000000000 -0400
67749 +++ linux-2.6.39.4/net/rxrpc/ar-output.c 2011-08-05 19:44:37.000000000 -0400
67750 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
67751 sp->hdr.cid = call->cid;
67752 sp->hdr.callNumber = call->call_id;
67753 sp->hdr.seq =
67754 - htonl(atomic_inc_return(&call->sequence));
67755 + htonl(atomic_inc_return_unchecked(&call->sequence));
67756 sp->hdr.serial =
67757 - htonl(atomic_inc_return(&conn->serial));
67758 + htonl(atomic_inc_return_unchecked(&conn->serial));
67759 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
67760 sp->hdr.userStatus = 0;
67761 sp->hdr.securityIndex = conn->security_ix;
67762 diff -urNp linux-2.6.39.4/net/rxrpc/ar-peer.c linux-2.6.39.4/net/rxrpc/ar-peer.c
67763 --- linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-05-19 00:06:34.000000000 -0400
67764 +++ linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-08-05 19:44:37.000000000 -0400
67765 @@ -71,7 +71,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
67766 INIT_LIST_HEAD(&peer->error_targets);
67767 spin_lock_init(&peer->lock);
67768 atomic_set(&peer->usage, 1);
67769 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
67770 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67771 memcpy(&peer->srx, srx, sizeof(*srx));
67772
67773 rxrpc_assess_MTU_size(peer);
67774 diff -urNp linux-2.6.39.4/net/rxrpc/ar-proc.c linux-2.6.39.4/net/rxrpc/ar-proc.c
67775 --- linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-05-19 00:06:34.000000000 -0400
67776 +++ linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-08-05 19:44:37.000000000 -0400
67777 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
67778 atomic_read(&conn->usage),
67779 rxrpc_conn_states[conn->state],
67780 key_serial(conn->key),
67781 - atomic_read(&conn->serial),
67782 - atomic_read(&conn->hi_serial));
67783 + atomic_read_unchecked(&conn->serial),
67784 + atomic_read_unchecked(&conn->hi_serial));
67785
67786 return 0;
67787 }
67788 diff -urNp linux-2.6.39.4/net/rxrpc/ar-transport.c linux-2.6.39.4/net/rxrpc/ar-transport.c
67789 --- linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-05-19 00:06:34.000000000 -0400
67790 +++ linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-08-05 19:44:37.000000000 -0400
67791 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
67792 spin_lock_init(&trans->client_lock);
67793 rwlock_init(&trans->conn_lock);
67794 atomic_set(&trans->usage, 1);
67795 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
67796 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67797
67798 if (peer->srx.transport.family == AF_INET) {
67799 switch (peer->srx.transport_type) {
67800 diff -urNp linux-2.6.39.4/net/rxrpc/rxkad.c linux-2.6.39.4/net/rxrpc/rxkad.c
67801 --- linux-2.6.39.4/net/rxrpc/rxkad.c 2011-05-19 00:06:34.000000000 -0400
67802 +++ linux-2.6.39.4/net/rxrpc/rxkad.c 2011-08-05 19:44:37.000000000 -0400
67803 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
67804 u16 check;
67805 int nsg;
67806
67807 + pax_track_stack();
67808 +
67809 sp = rxrpc_skb(skb);
67810
67811 _enter("");
67812 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
67813 u16 check;
67814 int nsg;
67815
67816 + pax_track_stack();
67817 +
67818 _enter("");
67819
67820 sp = rxrpc_skb(skb);
67821 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
67822
67823 len = iov[0].iov_len + iov[1].iov_len;
67824
67825 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67826 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67827 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
67828
67829 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67830 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
67831
67832 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
67833
67834 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
67835 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67836 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
67837
67838 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
67839 diff -urNp linux-2.6.39.4/net/sctp/proc.c linux-2.6.39.4/net/sctp/proc.c
67840 --- linux-2.6.39.4/net/sctp/proc.c 2011-05-19 00:06:34.000000000 -0400
67841 +++ linux-2.6.39.4/net/sctp/proc.c 2011-08-05 19:44:37.000000000 -0400
67842 @@ -212,7 +212,12 @@ static int sctp_eps_seq_show(struct seq_
67843 sctp_for_each_hentry(epb, node, &head->chain) {
67844 ep = sctp_ep(epb);
67845 sk = epb->sk;
67846 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
67847 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
67848 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67849 + NULL, NULL,
67850 +#else
67851 + ep, sk,
67852 +#endif
67853 sctp_sk(sk)->type, sk->sk_state, hash,
67854 epb->bind_addr.port,
67855 sock_i_uid(sk), sock_i_ino(sk));
67856 @@ -318,7 +323,12 @@ static int sctp_assocs_seq_show(struct s
67857 seq_printf(seq,
67858 "%8p %8p %-3d %-3d %-2d %-4d "
67859 "%4d %8d %8d %7d %5lu %-5d %5d ",
67860 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
67861 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67862 + NULL, NULL,
67863 +#else
67864 + assoc, sk,
67865 +#endif
67866 + sctp_sk(sk)->type, sk->sk_state,
67867 assoc->state, hash,
67868 assoc->assoc_id,
67869 assoc->sndbuf_used,
67870 diff -urNp linux-2.6.39.4/net/sctp/socket.c linux-2.6.39.4/net/sctp/socket.c
67871 --- linux-2.6.39.4/net/sctp/socket.c 2011-05-19 00:06:34.000000000 -0400
67872 +++ linux-2.6.39.4/net/sctp/socket.c 2011-08-05 19:44:37.000000000 -0400
67873 @@ -4433,7 +4433,7 @@ static int sctp_getsockopt_peer_addrs(st
67874 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
67875 if (space_left < addrlen)
67876 return -ENOMEM;
67877 - if (copy_to_user(to, &temp, addrlen))
67878 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
67879 return -EFAULT;
67880 to += addrlen;
67881 cnt++;
67882 diff -urNp linux-2.6.39.4/net/socket.c linux-2.6.39.4/net/socket.c
67883 --- linux-2.6.39.4/net/socket.c 2011-06-03 00:04:14.000000000 -0400
67884 +++ linux-2.6.39.4/net/socket.c 2011-08-05 19:44:37.000000000 -0400
67885 @@ -88,6 +88,7 @@
67886 #include <linux/nsproxy.h>
67887 #include <linux/magic.h>
67888 #include <linux/slab.h>
67889 +#include <linux/in.h>
67890
67891 #include <asm/uaccess.h>
67892 #include <asm/unistd.h>
67893 @@ -105,6 +106,8 @@
67894 #include <linux/sockios.h>
67895 #include <linux/atalk.h>
67896
67897 +#include <linux/grsock.h>
67898 +
67899 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
67900 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
67901 unsigned long nr_segs, loff_t pos);
67902 @@ -330,7 +333,7 @@ static struct dentry *sockfs_mount(struc
67903 &sockfs_dentry_operations, SOCKFS_MAGIC);
67904 }
67905
67906 -static struct vfsmount *sock_mnt __read_mostly;
67907 +struct vfsmount *sock_mnt __read_mostly;
67908
67909 static struct file_system_type sock_fs_type = {
67910 .name = "sockfs",
67911 @@ -1179,6 +1182,8 @@ int __sock_create(struct net *net, int f
67912 return -EAFNOSUPPORT;
67913 if (type < 0 || type >= SOCK_MAX)
67914 return -EINVAL;
67915 + if (protocol < 0)
67916 + return -EINVAL;
67917
67918 /* Compatibility.
67919
67920 @@ -1311,6 +1316,16 @@ SYSCALL_DEFINE3(socket, int, family, int
67921 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
67922 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
67923
67924 + if(!gr_search_socket(family, type, protocol)) {
67925 + retval = -EACCES;
67926 + goto out;
67927 + }
67928 +
67929 + if (gr_handle_sock_all(family, type, protocol)) {
67930 + retval = -EACCES;
67931 + goto out;
67932 + }
67933 +
67934 retval = sock_create(family, type, protocol, &sock);
67935 if (retval < 0)
67936 goto out;
67937 @@ -1423,6 +1438,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67938 if (sock) {
67939 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
67940 if (err >= 0) {
67941 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
67942 + err = -EACCES;
67943 + goto error;
67944 + }
67945 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
67946 + if (err)
67947 + goto error;
67948 +
67949 err = security_socket_bind(sock,
67950 (struct sockaddr *)&address,
67951 addrlen);
67952 @@ -1431,6 +1454,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
67953 (struct sockaddr *)
67954 &address, addrlen);
67955 }
67956 +error:
67957 fput_light(sock->file, fput_needed);
67958 }
67959 return err;
67960 @@ -1454,10 +1478,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
67961 if ((unsigned)backlog > somaxconn)
67962 backlog = somaxconn;
67963
67964 + if (gr_handle_sock_server_other(sock->sk)) {
67965 + err = -EPERM;
67966 + goto error;
67967 + }
67968 +
67969 + err = gr_search_listen(sock);
67970 + if (err)
67971 + goto error;
67972 +
67973 err = security_socket_listen(sock, backlog);
67974 if (!err)
67975 err = sock->ops->listen(sock, backlog);
67976
67977 +error:
67978 fput_light(sock->file, fput_needed);
67979 }
67980 return err;
67981 @@ -1501,6 +1535,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
67982 newsock->type = sock->type;
67983 newsock->ops = sock->ops;
67984
67985 + if (gr_handle_sock_server_other(sock->sk)) {
67986 + err = -EPERM;
67987 + sock_release(newsock);
67988 + goto out_put;
67989 + }
67990 +
67991 + err = gr_search_accept(sock);
67992 + if (err) {
67993 + sock_release(newsock);
67994 + goto out_put;
67995 + }
67996 +
67997 /*
67998 * We don't need try_module_get here, as the listening socket (sock)
67999 * has the protocol module (sock->ops->owner) held.
68000 @@ -1539,6 +1585,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
68001 fd_install(newfd, newfile);
68002 err = newfd;
68003
68004 + gr_attach_curr_ip(newsock->sk);
68005 +
68006 out_put:
68007 fput_light(sock->file, fput_needed);
68008 out:
68009 @@ -1571,6 +1619,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68010 int, addrlen)
68011 {
68012 struct socket *sock;
68013 + struct sockaddr *sck;
68014 struct sockaddr_storage address;
68015 int err, fput_needed;
68016
68017 @@ -1581,6 +1630,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68018 if (err < 0)
68019 goto out_put;
68020
68021 + sck = (struct sockaddr *)&address;
68022 +
68023 + if (gr_handle_sock_client(sck)) {
68024 + err = -EACCES;
68025 + goto out_put;
68026 + }
68027 +
68028 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
68029 + if (err)
68030 + goto out_put;
68031 +
68032 err =
68033 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
68034 if (err)
68035 @@ -1882,6 +1942,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
68036 int err, ctl_len, iov_size, total_len;
68037 int fput_needed;
68038
68039 + pax_track_stack();
68040 +
68041 err = -EFAULT;
68042 if (MSG_CMSG_COMPAT & flags) {
68043 if (get_compat_msghdr(&msg_sys, msg_compat))
68044 diff -urNp linux-2.6.39.4/net/sunrpc/sched.c linux-2.6.39.4/net/sunrpc/sched.c
68045 --- linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:11:51.000000000 -0400
68046 +++ linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:12:20.000000000 -0400
68047 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
68048 #ifdef RPC_DEBUG
68049 static void rpc_task_set_debuginfo(struct rpc_task *task)
68050 {
68051 - static atomic_t rpc_pid;
68052 + static atomic_unchecked_t rpc_pid;
68053
68054 - task->tk_pid = atomic_inc_return(&rpc_pid);
68055 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
68056 }
68057 #else
68058 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
68059 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c
68060 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-19 00:06:34.000000000 -0400
68061 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-05 19:44:37.000000000 -0400
68062 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
68063 static unsigned int min_max_inline = 4096;
68064 static unsigned int max_max_inline = 65536;
68065
68066 -atomic_t rdma_stat_recv;
68067 -atomic_t rdma_stat_read;
68068 -atomic_t rdma_stat_write;
68069 -atomic_t rdma_stat_sq_starve;
68070 -atomic_t rdma_stat_rq_starve;
68071 -atomic_t rdma_stat_rq_poll;
68072 -atomic_t rdma_stat_rq_prod;
68073 -atomic_t rdma_stat_sq_poll;
68074 -atomic_t rdma_stat_sq_prod;
68075 +atomic_unchecked_t rdma_stat_recv;
68076 +atomic_unchecked_t rdma_stat_read;
68077 +atomic_unchecked_t rdma_stat_write;
68078 +atomic_unchecked_t rdma_stat_sq_starve;
68079 +atomic_unchecked_t rdma_stat_rq_starve;
68080 +atomic_unchecked_t rdma_stat_rq_poll;
68081 +atomic_unchecked_t rdma_stat_rq_prod;
68082 +atomic_unchecked_t rdma_stat_sq_poll;
68083 +atomic_unchecked_t rdma_stat_sq_prod;
68084
68085 /* Temporary NFS request map and context caches */
68086 struct kmem_cache *svc_rdma_map_cachep;
68087 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
68088 len -= *ppos;
68089 if (len > *lenp)
68090 len = *lenp;
68091 - if (len && copy_to_user(buffer, str_buf, len))
68092 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
68093 return -EFAULT;
68094 *lenp = len;
68095 *ppos += len;
68096 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
68097 {
68098 .procname = "rdma_stat_read",
68099 .data = &rdma_stat_read,
68100 - .maxlen = sizeof(atomic_t),
68101 + .maxlen = sizeof(atomic_unchecked_t),
68102 .mode = 0644,
68103 .proc_handler = read_reset_stat,
68104 },
68105 {
68106 .procname = "rdma_stat_recv",
68107 .data = &rdma_stat_recv,
68108 - .maxlen = sizeof(atomic_t),
68109 + .maxlen = sizeof(atomic_unchecked_t),
68110 .mode = 0644,
68111 .proc_handler = read_reset_stat,
68112 },
68113 {
68114 .procname = "rdma_stat_write",
68115 .data = &rdma_stat_write,
68116 - .maxlen = sizeof(atomic_t),
68117 + .maxlen = sizeof(atomic_unchecked_t),
68118 .mode = 0644,
68119 .proc_handler = read_reset_stat,
68120 },
68121 {
68122 .procname = "rdma_stat_sq_starve",
68123 .data = &rdma_stat_sq_starve,
68124 - .maxlen = sizeof(atomic_t),
68125 + .maxlen = sizeof(atomic_unchecked_t),
68126 .mode = 0644,
68127 .proc_handler = read_reset_stat,
68128 },
68129 {
68130 .procname = "rdma_stat_rq_starve",
68131 .data = &rdma_stat_rq_starve,
68132 - .maxlen = sizeof(atomic_t),
68133 + .maxlen = sizeof(atomic_unchecked_t),
68134 .mode = 0644,
68135 .proc_handler = read_reset_stat,
68136 },
68137 {
68138 .procname = "rdma_stat_rq_poll",
68139 .data = &rdma_stat_rq_poll,
68140 - .maxlen = sizeof(atomic_t),
68141 + .maxlen = sizeof(atomic_unchecked_t),
68142 .mode = 0644,
68143 .proc_handler = read_reset_stat,
68144 },
68145 {
68146 .procname = "rdma_stat_rq_prod",
68147 .data = &rdma_stat_rq_prod,
68148 - .maxlen = sizeof(atomic_t),
68149 + .maxlen = sizeof(atomic_unchecked_t),
68150 .mode = 0644,
68151 .proc_handler = read_reset_stat,
68152 },
68153 {
68154 .procname = "rdma_stat_sq_poll",
68155 .data = &rdma_stat_sq_poll,
68156 - .maxlen = sizeof(atomic_t),
68157 + .maxlen = sizeof(atomic_unchecked_t),
68158 .mode = 0644,
68159 .proc_handler = read_reset_stat,
68160 },
68161 {
68162 .procname = "rdma_stat_sq_prod",
68163 .data = &rdma_stat_sq_prod,
68164 - .maxlen = sizeof(atomic_t),
68165 + .maxlen = sizeof(atomic_unchecked_t),
68166 .mode = 0644,
68167 .proc_handler = read_reset_stat,
68168 },
68169 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
68170 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-19 00:06:34.000000000 -0400
68171 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-05 19:44:37.000000000 -0400
68172 @@ -499,7 +499,7 @@ next_sge:
68173 svc_rdma_put_context(ctxt, 0);
68174 goto out;
68175 }
68176 - atomic_inc(&rdma_stat_read);
68177 + atomic_inc_unchecked(&rdma_stat_read);
68178
68179 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
68180 chl_map->ch[ch_no].count -= read_wr.num_sge;
68181 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68182 dto_q);
68183 list_del_init(&ctxt->dto_q);
68184 } else {
68185 - atomic_inc(&rdma_stat_rq_starve);
68186 + atomic_inc_unchecked(&rdma_stat_rq_starve);
68187 clear_bit(XPT_DATA, &xprt->xpt_flags);
68188 ctxt = NULL;
68189 }
68190 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68191 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
68192 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
68193 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
68194 - atomic_inc(&rdma_stat_recv);
68195 + atomic_inc_unchecked(&rdma_stat_recv);
68196
68197 /* Build up the XDR from the receive buffers. */
68198 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
68199 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c
68200 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-19 00:06:34.000000000 -0400
68201 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-05 19:44:37.000000000 -0400
68202 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
68203 write_wr.wr.rdma.remote_addr = to;
68204
68205 /* Post It */
68206 - atomic_inc(&rdma_stat_write);
68207 + atomic_inc_unchecked(&rdma_stat_write);
68208 if (svc_rdma_send(xprt, &write_wr))
68209 goto err;
68210 return 0;
68211 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c
68212 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-19 00:06:34.000000000 -0400
68213 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-05 19:44:37.000000000 -0400
68214 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
68215 return;
68216
68217 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
68218 - atomic_inc(&rdma_stat_rq_poll);
68219 + atomic_inc_unchecked(&rdma_stat_rq_poll);
68220
68221 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
68222 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
68223 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
68224 }
68225
68226 if (ctxt)
68227 - atomic_inc(&rdma_stat_rq_prod);
68228 + atomic_inc_unchecked(&rdma_stat_rq_prod);
68229
68230 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
68231 /*
68232 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
68233 return;
68234
68235 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
68236 - atomic_inc(&rdma_stat_sq_poll);
68237 + atomic_inc_unchecked(&rdma_stat_sq_poll);
68238 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
68239 if (wc.status != IB_WC_SUCCESS)
68240 /* Close the transport */
68241 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
68242 }
68243
68244 if (ctxt)
68245 - atomic_inc(&rdma_stat_sq_prod);
68246 + atomic_inc_unchecked(&rdma_stat_sq_prod);
68247 }
68248
68249 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
68250 @@ -1271,7 +1271,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
68251 spin_lock_bh(&xprt->sc_lock);
68252 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
68253 spin_unlock_bh(&xprt->sc_lock);
68254 - atomic_inc(&rdma_stat_sq_starve);
68255 + atomic_inc_unchecked(&rdma_stat_sq_starve);
68256
68257 /* See if we can opportunistically reap SQ WR to make room */
68258 sq_cq_reap(xprt);
68259 diff -urNp linux-2.6.39.4/net/sysctl_net.c linux-2.6.39.4/net/sysctl_net.c
68260 --- linux-2.6.39.4/net/sysctl_net.c 2011-05-19 00:06:34.000000000 -0400
68261 +++ linux-2.6.39.4/net/sysctl_net.c 2011-08-05 19:44:37.000000000 -0400
68262 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
68263 struct ctl_table *table)
68264 {
68265 /* Allow network administrator to have same access as root. */
68266 - if (capable(CAP_NET_ADMIN)) {
68267 + if (capable_nolog(CAP_NET_ADMIN)) {
68268 int mode = (table->mode >> 6) & 7;
68269 return (mode << 6) | (mode << 3) | mode;
68270 }
68271 diff -urNp linux-2.6.39.4/net/unix/af_unix.c linux-2.6.39.4/net/unix/af_unix.c
68272 --- linux-2.6.39.4/net/unix/af_unix.c 2011-05-19 00:06:34.000000000 -0400
68273 +++ linux-2.6.39.4/net/unix/af_unix.c 2011-08-05 19:44:37.000000000 -0400
68274 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
68275 err = -ECONNREFUSED;
68276 if (!S_ISSOCK(inode->i_mode))
68277 goto put_fail;
68278 +
68279 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
68280 + err = -EACCES;
68281 + goto put_fail;
68282 + }
68283 +
68284 u = unix_find_socket_byinode(inode);
68285 if (!u)
68286 goto put_fail;
68287 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
68288 if (u) {
68289 struct dentry *dentry;
68290 dentry = unix_sk(u)->dentry;
68291 +
68292 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
68293 + err = -EPERM;
68294 + sock_put(u);
68295 + goto fail;
68296 + }
68297 +
68298 if (dentry)
68299 touch_atime(unix_sk(u)->mnt, dentry);
68300 } else
68301 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
68302 err = security_path_mknod(&nd.path, dentry, mode, 0);
68303 if (err)
68304 goto out_mknod_drop_write;
68305 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
68306 + err = -EACCES;
68307 + goto out_mknod_drop_write;
68308 + }
68309 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
68310 out_mknod_drop_write:
68311 mnt_drop_write(nd.path.mnt);
68312 if (err)
68313 goto out_mknod_dput;
68314 +
68315 + gr_handle_create(dentry, nd.path.mnt);
68316 +
68317 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
68318 dput(nd.path.dentry);
68319 nd.path.dentry = dentry;
68320 @@ -2255,7 +2275,11 @@ static int unix_seq_show(struct seq_file
68321 unix_state_lock(s);
68322
68323 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
68324 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68325 + NULL,
68326 +#else
68327 s,
68328 +#endif
68329 atomic_read(&s->sk_refcnt),
68330 0,
68331 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
68332 diff -urNp linux-2.6.39.4/net/wireless/core.h linux-2.6.39.4/net/wireless/core.h
68333 --- linux-2.6.39.4/net/wireless/core.h 2011-05-19 00:06:34.000000000 -0400
68334 +++ linux-2.6.39.4/net/wireless/core.h 2011-08-05 20:34:06.000000000 -0400
68335 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
68336 struct mutex mtx;
68337
68338 /* rfkill support */
68339 - struct rfkill_ops rfkill_ops;
68340 + rfkill_ops_no_const rfkill_ops;
68341 struct rfkill *rfkill;
68342 struct work_struct rfkill_sync;
68343
68344 diff -urNp linux-2.6.39.4/net/wireless/wext-core.c linux-2.6.39.4/net/wireless/wext-core.c
68345 --- linux-2.6.39.4/net/wireless/wext-core.c 2011-05-19 00:06:34.000000000 -0400
68346 +++ linux-2.6.39.4/net/wireless/wext-core.c 2011-08-05 19:44:37.000000000 -0400
68347 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
68348 */
68349
68350 /* Support for very large requests */
68351 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
68352 - (user_length > descr->max_tokens)) {
68353 + if (user_length > descr->max_tokens) {
68354 /* Allow userspace to GET more than max so
68355 * we can support any size GET requests.
68356 * There is still a limit : -ENOMEM.
68357 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
68358 }
68359 }
68360
68361 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
68362 - /*
68363 - * If this is a GET, but not NOMAX, it means that the extra
68364 - * data is not bounded by userspace, but by max_tokens. Thus
68365 - * set the length to max_tokens. This matches the extra data
68366 - * allocation.
68367 - * The driver should fill it with the number of tokens it
68368 - * provided, and it may check iwp->length rather than having
68369 - * knowledge of max_tokens. If the driver doesn't change the
68370 - * iwp->length, this ioctl just copies back max_token tokens
68371 - * filled with zeroes. Hopefully the driver isn't claiming
68372 - * them to be valid data.
68373 - */
68374 - iwp->length = descr->max_tokens;
68375 - }
68376 -
68377 err = handler(dev, info, (union iwreq_data *) iwp, extra);
68378
68379 iwp->length += essid_compat;
68380 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_policy.c linux-2.6.39.4/net/xfrm/xfrm_policy.c
68381 --- linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-05-19 00:06:34.000000000 -0400
68382 +++ linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-08-05 19:44:37.000000000 -0400
68383 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
68384 {
68385 policy->walk.dead = 1;
68386
68387 - atomic_inc(&policy->genid);
68388 + atomic_inc_unchecked(&policy->genid);
68389
68390 if (del_timer(&policy->timer))
68391 xfrm_pol_put(policy);
68392 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
68393 hlist_add_head(&policy->bydst, chain);
68394 xfrm_pol_hold(policy);
68395 net->xfrm.policy_count[dir]++;
68396 - atomic_inc(&flow_cache_genid);
68397 + atomic_inc_unchecked(&flow_cache_genid);
68398 if (delpol)
68399 __xfrm_policy_unlink(delpol, dir);
68400 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
68401 @@ -1527,7 +1527,7 @@ free_dst:
68402 goto out;
68403 }
68404
68405 -static int inline
68406 +static inline int
68407 xfrm_dst_alloc_copy(void **target, const void *src, int size)
68408 {
68409 if (!*target) {
68410 @@ -1539,7 +1539,7 @@ xfrm_dst_alloc_copy(void **target, const
68411 return 0;
68412 }
68413
68414 -static int inline
68415 +static inline int
68416 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
68417 {
68418 #ifdef CONFIG_XFRM_SUB_POLICY
68419 @@ -1551,7 +1551,7 @@ xfrm_dst_update_parent(struct dst_entry
68420 #endif
68421 }
68422
68423 -static int inline
68424 +static inline int
68425 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
68426 {
68427 #ifdef CONFIG_XFRM_SUB_POLICY
68428 @@ -1645,7 +1645,7 @@ xfrm_resolve_and_create_bundle(struct xf
68429
68430 xdst->num_pols = num_pols;
68431 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
68432 - xdst->policy_genid = atomic_read(&pols[0]->genid);
68433 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
68434
68435 return xdst;
68436 }
68437 @@ -2332,7 +2332,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
68438 if (xdst->xfrm_genid != dst->xfrm->genid)
68439 return 0;
68440 if (xdst->num_pols > 0 &&
68441 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
68442 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
68443 return 0;
68444
68445 mtu = dst_mtu(dst->child);
68446 @@ -2860,7 +2860,7 @@ static int xfrm_policy_migrate(struct xf
68447 sizeof(pol->xfrm_vec[i].saddr));
68448 pol->xfrm_vec[i].encap_family = mp->new_family;
68449 /* flush bundles */
68450 - atomic_inc(&pol->genid);
68451 + atomic_inc_unchecked(&pol->genid);
68452 }
68453 }
68454
68455 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_user.c linux-2.6.39.4/net/xfrm/xfrm_user.c
68456 --- linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-05-19 00:06:34.000000000 -0400
68457 +++ linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-08-05 19:44:37.000000000 -0400
68458 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
68459 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
68460 int i;
68461
68462 + pax_track_stack();
68463 +
68464 if (xp->xfrm_nr == 0)
68465 return 0;
68466
68467 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
68468 int err;
68469 int n = 0;
68470
68471 + pax_track_stack();
68472 +
68473 if (attrs[XFRMA_MIGRATE] == NULL)
68474 return -EINVAL;
68475
68476 diff -urNp linux-2.6.39.4/scripts/basic/fixdep.c linux-2.6.39.4/scripts/basic/fixdep.c
68477 --- linux-2.6.39.4/scripts/basic/fixdep.c 2011-05-19 00:06:34.000000000 -0400
68478 +++ linux-2.6.39.4/scripts/basic/fixdep.c 2011-08-05 19:44:37.000000000 -0400
68479 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
68480
68481 static void parse_config_file(const char *map, size_t len)
68482 {
68483 - const int *end = (const int *) (map + len);
68484 + const unsigned int *end = (const unsigned int *) (map + len);
68485 /* start at +1, so that p can never be < map */
68486 - const int *m = (const int *) map + 1;
68487 + const unsigned int *m = (const unsigned int *) map + 1;
68488 const char *p, *q;
68489
68490 for (; m < end; m++) {
68491 @@ -405,7 +405,7 @@ static void print_deps(void)
68492 static void traps(void)
68493 {
68494 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
68495 - int *p = (int *)test;
68496 + unsigned int *p = (unsigned int *)test;
68497
68498 if (*p != INT_CONF) {
68499 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
68500 diff -urNp linux-2.6.39.4/scripts/gcc-plugin.sh linux-2.6.39.4/scripts/gcc-plugin.sh
68501 --- linux-2.6.39.4/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
68502 +++ linux-2.6.39.4/scripts/gcc-plugin.sh 2011-08-05 20:34:06.000000000 -0400
68503 @@ -0,0 +1,3 @@
68504 +#!/bin/sh
68505 +
68506 +echo "#include \"gcc-plugin.h\"" | $* -x c - -c -o /dev/null -I`$* -print-file-name=plugin`/include>/dev/null 2>&1 && echo "y"
68507 diff -urNp linux-2.6.39.4/scripts/Makefile.build linux-2.6.39.4/scripts/Makefile.build
68508 --- linux-2.6.39.4/scripts/Makefile.build 2011-05-19 00:06:34.000000000 -0400
68509 +++ linux-2.6.39.4/scripts/Makefile.build 2011-08-05 19:44:37.000000000 -0400
68510 @@ -93,7 +93,7 @@ endif
68511 endif
68512
68513 # Do not include host rules unless needed
68514 -ifneq ($(hostprogs-y)$(hostprogs-m),)
68515 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
68516 include scripts/Makefile.host
68517 endif
68518
68519 diff -urNp linux-2.6.39.4/scripts/Makefile.clean linux-2.6.39.4/scripts/Makefile.clean
68520 --- linux-2.6.39.4/scripts/Makefile.clean 2011-05-19 00:06:34.000000000 -0400
68521 +++ linux-2.6.39.4/scripts/Makefile.clean 2011-08-05 19:44:37.000000000 -0400
68522 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
68523 __clean-files := $(extra-y) $(always) \
68524 $(targets) $(clean-files) \
68525 $(host-progs) \
68526 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
68527 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
68528 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
68529
68530 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
68531
68532 diff -urNp linux-2.6.39.4/scripts/Makefile.host linux-2.6.39.4/scripts/Makefile.host
68533 --- linux-2.6.39.4/scripts/Makefile.host 2011-05-19 00:06:34.000000000 -0400
68534 +++ linux-2.6.39.4/scripts/Makefile.host 2011-08-05 19:44:37.000000000 -0400
68535 @@ -31,6 +31,7 @@
68536 # Note: Shared libraries consisting of C++ files are not supported
68537
68538 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
68539 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
68540
68541 # C code
68542 # Executables compiled from a single .c file
68543 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
68544 # Shared libaries (only .c supported)
68545 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
68546 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
68547 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
68548 # Remove .so files from "xxx-objs"
68549 host-cobjs := $(filter-out %.so,$(host-cobjs))
68550
68551 diff -urNp linux-2.6.39.4/scripts/mod/file2alias.c linux-2.6.39.4/scripts/mod/file2alias.c
68552 --- linux-2.6.39.4/scripts/mod/file2alias.c 2011-05-19 00:06:34.000000000 -0400
68553 +++ linux-2.6.39.4/scripts/mod/file2alias.c 2011-08-05 19:44:37.000000000 -0400
68554 @@ -72,7 +72,7 @@ static void device_id_check(const char *
68555 unsigned long size, unsigned long id_size,
68556 void *symval)
68557 {
68558 - int i;
68559 + unsigned int i;
68560
68561 if (size % id_size || size < id_size) {
68562 if (cross_build != 0)
68563 @@ -102,7 +102,7 @@ static void device_id_check(const char *
68564 /* USB is special because the bcdDevice can be matched against a numeric range */
68565 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
68566 static void do_usb_entry(struct usb_device_id *id,
68567 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
68568 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
68569 unsigned char range_lo, unsigned char range_hi,
68570 unsigned char max, struct module *mod)
68571 {
68572 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
68573 for (i = 0; i < count; i++) {
68574 const char *id = (char *)devs[i].id;
68575 char acpi_id[sizeof(devs[0].id)];
68576 - int j;
68577 + unsigned int j;
68578
68579 buf_printf(&mod->dev_table_buf,
68580 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68581 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
68582
68583 for (j = 0; j < PNP_MAX_DEVICES; j++) {
68584 const char *id = (char *)card->devs[j].id;
68585 - int i2, j2;
68586 + unsigned int i2, j2;
68587 int dup = 0;
68588
68589 if (!id[0])
68590 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
68591 /* add an individual alias for every device entry */
68592 if (!dup) {
68593 char acpi_id[sizeof(card->devs[0].id)];
68594 - int k;
68595 + unsigned int k;
68596
68597 buf_printf(&mod->dev_table_buf,
68598 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68599 @@ -768,7 +768,7 @@ static void dmi_ascii_filter(char *d, co
68600 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
68601 char *alias)
68602 {
68603 - int i, j;
68604 + unsigned int i, j;
68605
68606 sprintf(alias, "dmi*");
68607
68608 diff -urNp linux-2.6.39.4/scripts/mod/modpost.c linux-2.6.39.4/scripts/mod/modpost.c
68609 --- linux-2.6.39.4/scripts/mod/modpost.c 2011-05-19 00:06:34.000000000 -0400
68610 +++ linux-2.6.39.4/scripts/mod/modpost.c 2011-08-05 19:44:37.000000000 -0400
68611 @@ -896,6 +896,7 @@ enum mismatch {
68612 ANY_INIT_TO_ANY_EXIT,
68613 ANY_EXIT_TO_ANY_INIT,
68614 EXPORT_TO_INIT_EXIT,
68615 + DATA_TO_TEXT
68616 };
68617
68618 struct sectioncheck {
68619 @@ -1004,6 +1005,12 @@ const struct sectioncheck sectioncheck[]
68620 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
68621 .mismatch = EXPORT_TO_INIT_EXIT,
68622 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
68623 +},
68624 +/* Do not reference code from writable data */
68625 +{
68626 + .fromsec = { DATA_SECTIONS, NULL },
68627 + .tosec = { TEXT_SECTIONS, NULL },
68628 + .mismatch = DATA_TO_TEXT
68629 }
68630 };
68631
68632 @@ -1126,10 +1133,10 @@ static Elf_Sym *find_elf_symbol(struct e
68633 continue;
68634 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
68635 continue;
68636 - if (sym->st_value == addr)
68637 - return sym;
68638 /* Find a symbol nearby - addr are maybe negative */
68639 d = sym->st_value - addr;
68640 + if (d == 0)
68641 + return sym;
68642 if (d < 0)
68643 d = addr - sym->st_value;
68644 if (d < distance) {
68645 @@ -1408,6 +1415,14 @@ static void report_sec_mismatch(const ch
68646 tosym, prl_to, prl_to, tosym);
68647 free(prl_to);
68648 break;
68649 + case DATA_TO_TEXT:
68650 +/*
68651 + fprintf(stderr,
68652 + "The variable %s references\n"
68653 + "the %s %s%s%s\n",
68654 + fromsym, to, sec2annotation(tosec), tosym, to_p);
68655 +*/
68656 + break;
68657 }
68658 fprintf(stderr, "\n");
68659 }
68660 @@ -1633,7 +1648,7 @@ static void section_rel(const char *modn
68661 static void check_sec_ref(struct module *mod, const char *modname,
68662 struct elf_info *elf)
68663 {
68664 - int i;
68665 + unsigned int i;
68666 Elf_Shdr *sechdrs = elf->sechdrs;
68667
68668 /* Walk through all sections */
68669 @@ -1731,7 +1746,7 @@ void __attribute__((format(printf, 2, 3)
68670 va_end(ap);
68671 }
68672
68673 -void buf_write(struct buffer *buf, const char *s, int len)
68674 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
68675 {
68676 if (buf->size - buf->pos < len) {
68677 buf->size += len + SZ;
68678 @@ -1943,7 +1958,7 @@ static void write_if_changed(struct buff
68679 if (fstat(fileno(file), &st) < 0)
68680 goto close_write;
68681
68682 - if (st.st_size != b->pos)
68683 + if (st.st_size != (off_t)b->pos)
68684 goto close_write;
68685
68686 tmp = NOFAIL(malloc(b->pos));
68687 diff -urNp linux-2.6.39.4/scripts/mod/modpost.h linux-2.6.39.4/scripts/mod/modpost.h
68688 --- linux-2.6.39.4/scripts/mod/modpost.h 2011-05-19 00:06:34.000000000 -0400
68689 +++ linux-2.6.39.4/scripts/mod/modpost.h 2011-08-05 19:44:37.000000000 -0400
68690 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
68691
68692 struct buffer {
68693 char *p;
68694 - int pos;
68695 - int size;
68696 + unsigned int pos;
68697 + unsigned int size;
68698 };
68699
68700 void __attribute__((format(printf, 2, 3)))
68701 buf_printf(struct buffer *buf, const char *fmt, ...);
68702
68703 void
68704 -buf_write(struct buffer *buf, const char *s, int len);
68705 +buf_write(struct buffer *buf, const char *s, unsigned int len);
68706
68707 struct module {
68708 struct module *next;
68709 diff -urNp linux-2.6.39.4/scripts/mod/sumversion.c linux-2.6.39.4/scripts/mod/sumversion.c
68710 --- linux-2.6.39.4/scripts/mod/sumversion.c 2011-05-19 00:06:34.000000000 -0400
68711 +++ linux-2.6.39.4/scripts/mod/sumversion.c 2011-08-05 19:44:37.000000000 -0400
68712 @@ -470,7 +470,7 @@ static void write_version(const char *fi
68713 goto out;
68714 }
68715
68716 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
68717 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
68718 warn("writing sum in %s failed: %s\n",
68719 filename, strerror(errno));
68720 goto out;
68721 diff -urNp linux-2.6.39.4/scripts/pnmtologo.c linux-2.6.39.4/scripts/pnmtologo.c
68722 --- linux-2.6.39.4/scripts/pnmtologo.c 2011-05-19 00:06:34.000000000 -0400
68723 +++ linux-2.6.39.4/scripts/pnmtologo.c 2011-08-05 19:44:37.000000000 -0400
68724 @@ -237,14 +237,14 @@ static void write_header(void)
68725 fprintf(out, " * Linux logo %s\n", logoname);
68726 fputs(" */\n\n", out);
68727 fputs("#include <linux/linux_logo.h>\n\n", out);
68728 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
68729 + fprintf(out, "static unsigned char %s_data[] = {\n",
68730 logoname);
68731 }
68732
68733 static void write_footer(void)
68734 {
68735 fputs("\n};\n\n", out);
68736 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
68737 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
68738 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
68739 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
68740 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
68741 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
68742 fputs("\n};\n\n", out);
68743
68744 /* write logo clut */
68745 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
68746 + fprintf(out, "static unsigned char %s_clut[] = {\n",
68747 logoname);
68748 write_hex_cnt = 0;
68749 for (i = 0; i < logo_clutsize; i++) {
68750 diff -urNp linux-2.6.39.4/security/apparmor/lsm.c linux-2.6.39.4/security/apparmor/lsm.c
68751 --- linux-2.6.39.4/security/apparmor/lsm.c 2011-06-25 12:55:23.000000000 -0400
68752 +++ linux-2.6.39.4/security/apparmor/lsm.c 2011-08-05 20:34:06.000000000 -0400
68753 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
68754 return error;
68755 }
68756
68757 -static struct security_operations apparmor_ops = {
68758 +static struct security_operations apparmor_ops __read_only = {
68759 .name = "apparmor",
68760
68761 .ptrace_access_check = apparmor_ptrace_access_check,
68762 diff -urNp linux-2.6.39.4/security/commoncap.c linux-2.6.39.4/security/commoncap.c
68763 --- linux-2.6.39.4/security/commoncap.c 2011-05-19 00:06:34.000000000 -0400
68764 +++ linux-2.6.39.4/security/commoncap.c 2011-08-05 19:44:37.000000000 -0400
68765 @@ -28,6 +28,7 @@
68766 #include <linux/prctl.h>
68767 #include <linux/securebits.h>
68768 #include <linux/user_namespace.h>
68769 +#include <net/sock.h>
68770
68771 /*
68772 * If a non-root user executes a setuid-root binary in
68773 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
68774
68775 int cap_netlink_recv(struct sk_buff *skb, int cap)
68776 {
68777 - if (!cap_raised(current_cap(), cap))
68778 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
68779 return -EPERM;
68780 return 0;
68781 }
68782 @@ -580,6 +581,9 @@ int cap_bprm_secureexec(struct linux_bin
68783 {
68784 const struct cred *cred = current_cred();
68785
68786 + if (gr_acl_enable_at_secure())
68787 + return 1;
68788 +
68789 if (cred->uid != 0) {
68790 if (bprm->cap_effective)
68791 return 1;
68792 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_api.c linux-2.6.39.4/security/integrity/ima/ima_api.c
68793 --- linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-05-19 00:06:34.000000000 -0400
68794 +++ linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-08-05 19:44:37.000000000 -0400
68795 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
68796 int result;
68797
68798 /* can overflow, only indicator */
68799 - atomic_long_inc(&ima_htable.violations);
68800 + atomic_long_inc_unchecked(&ima_htable.violations);
68801
68802 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
68803 if (!entry) {
68804 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_fs.c linux-2.6.39.4/security/integrity/ima/ima_fs.c
68805 --- linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-05-19 00:06:34.000000000 -0400
68806 +++ linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-08-05 19:44:37.000000000 -0400
68807 @@ -28,12 +28,12 @@
68808 static int valid_policy = 1;
68809 #define TMPBUFLEN 12
68810 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
68811 - loff_t *ppos, atomic_long_t *val)
68812 + loff_t *ppos, atomic_long_unchecked_t *val)
68813 {
68814 char tmpbuf[TMPBUFLEN];
68815 ssize_t len;
68816
68817 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
68818 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
68819 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
68820 }
68821
68822 diff -urNp linux-2.6.39.4/security/integrity/ima/ima.h linux-2.6.39.4/security/integrity/ima/ima.h
68823 --- linux-2.6.39.4/security/integrity/ima/ima.h 2011-05-19 00:06:34.000000000 -0400
68824 +++ linux-2.6.39.4/security/integrity/ima/ima.h 2011-08-05 19:44:37.000000000 -0400
68825 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
68826 extern spinlock_t ima_queue_lock;
68827
68828 struct ima_h_table {
68829 - atomic_long_t len; /* number of stored measurements in the list */
68830 - atomic_long_t violations;
68831 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
68832 + atomic_long_unchecked_t violations;
68833 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
68834 };
68835 extern struct ima_h_table ima_htable;
68836 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_queue.c linux-2.6.39.4/security/integrity/ima/ima_queue.c
68837 --- linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-05-19 00:06:34.000000000 -0400
68838 +++ linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-08-05 19:44:37.000000000 -0400
68839 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
68840 INIT_LIST_HEAD(&qe->later);
68841 list_add_tail_rcu(&qe->later, &ima_measurements);
68842
68843 - atomic_long_inc(&ima_htable.len);
68844 + atomic_long_inc_unchecked(&ima_htable.len);
68845 key = ima_hash_key(entry->digest);
68846 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
68847 return 0;
68848 diff -urNp linux-2.6.39.4/security/Kconfig linux-2.6.39.4/security/Kconfig
68849 --- linux-2.6.39.4/security/Kconfig 2011-05-19 00:06:34.000000000 -0400
68850 +++ linux-2.6.39.4/security/Kconfig 2011-08-05 19:44:37.000000000 -0400
68851 @@ -4,6 +4,554 @@
68852
68853 menu "Security options"
68854
68855 +source grsecurity/Kconfig
68856 +
68857 +menu "PaX"
68858 +
68859 + config ARCH_TRACK_EXEC_LIMIT
68860 + bool
68861 +
68862 + config PAX_PER_CPU_PGD
68863 + bool
68864 +
68865 + config TASK_SIZE_MAX_SHIFT
68866 + int
68867 + depends on X86_64
68868 + default 47 if !PAX_PER_CPU_PGD
68869 + default 42 if PAX_PER_CPU_PGD
68870 +
68871 + config PAX_ENABLE_PAE
68872 + bool
68873 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
68874 +
68875 +config PAX
68876 + bool "Enable various PaX features"
68877 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
68878 + help
68879 + This allows you to enable various PaX features. PaX adds
68880 + intrusion prevention mechanisms to the kernel that reduce
68881 + the risks posed by exploitable memory corruption bugs.
68882 +
68883 +menu "PaX Control"
68884 + depends on PAX
68885 +
68886 +config PAX_SOFTMODE
68887 + bool 'Support soft mode'
68888 + select PAX_PT_PAX_FLAGS
68889 + help
68890 + Enabling this option will allow you to run PaX in soft mode, that
68891 + is, PaX features will not be enforced by default, only on executables
68892 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
68893 + is the only way to mark executables for soft mode use.
68894 +
68895 + Soft mode can be activated by using the "pax_softmode=1" kernel command
68896 + line option on boot. Furthermore you can control various PaX features
68897 + at runtime via the entries in /proc/sys/kernel/pax.
68898 +
68899 +config PAX_EI_PAX
68900 + bool 'Use legacy ELF header marking'
68901 + help
68902 + Enabling this option will allow you to control PaX features on
68903 + a per executable basis via the 'chpax' utility available at
68904 + http://pax.grsecurity.net/. The control flags will be read from
68905 + an otherwise reserved part of the ELF header. This marking has
68906 + numerous drawbacks (no support for soft-mode, toolchain does not
68907 + know about the non-standard use of the ELF header) therefore it
68908 + has been deprecated in favour of PT_PAX_FLAGS support.
68909 +
68910 + Note that if you enable PT_PAX_FLAGS marking support as well,
68911 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
68912 +
68913 +config PAX_PT_PAX_FLAGS
68914 + bool 'Use ELF program header marking'
68915 + help
68916 + Enabling this option will allow you to control PaX features on
68917 + a per executable basis via the 'paxctl' utility available at
68918 + http://pax.grsecurity.net/. The control flags will be read from
68919 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
68920 + has the benefits of supporting both soft mode and being fully
68921 + integrated into the toolchain (the binutils patch is available
68922 + from http://pax.grsecurity.net).
68923 +
68924 + If your toolchain does not support PT_PAX_FLAGS markings,
68925 + you can create one in most cases with 'paxctl -C'.
68926 +
68927 + Note that if you enable the legacy EI_PAX marking support as well,
68928 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
68929 +
68930 +choice
68931 + prompt 'MAC system integration'
68932 + default PAX_HAVE_ACL_FLAGS
68933 + help
68934 + Mandatory Access Control systems have the option of controlling
68935 + PaX flags on a per executable basis, choose the method supported
68936 + by your particular system.
68937 +
68938 + - "none": if your MAC system does not interact with PaX,
68939 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
68940 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
68941 +
68942 + NOTE: this option is for developers/integrators only.
68943 +
68944 + config PAX_NO_ACL_FLAGS
68945 + bool 'none'
68946 +
68947 + config PAX_HAVE_ACL_FLAGS
68948 + bool 'direct'
68949 +
68950 + config PAX_HOOK_ACL_FLAGS
68951 + bool 'hook'
68952 +endchoice
68953 +
68954 +endmenu
68955 +
68956 +menu "Non-executable pages"
68957 + depends on PAX
68958 +
68959 +config PAX_NOEXEC
68960 + bool "Enforce non-executable pages"
68961 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
68962 + help
68963 + By design some architectures do not allow for protecting memory
68964 + pages against execution or even if they do, Linux does not make
68965 + use of this feature. In practice this means that if a page is
68966 + readable (such as the stack or heap) it is also executable.
68967 +
68968 + There is a well known exploit technique that makes use of this
68969 + fact and a common programming mistake where an attacker can
68970 + introduce code of his choice somewhere in the attacked program's
68971 + memory (typically the stack or the heap) and then execute it.
68972 +
68973 + If the attacked program was running with different (typically
68974 + higher) privileges than that of the attacker, then he can elevate
68975 + his own privilege level (e.g. get a root shell, write to files for
68976 + which he does not have write access to, etc).
68977 +
68978 + Enabling this option will let you choose from various features
68979 + that prevent the injection and execution of 'foreign' code in
68980 + a program.
68981 +
68982 + This will also break programs that rely on the old behaviour and
68983 + expect that dynamically allocated memory via the malloc() family
68984 + of functions is executable (which it is not). Notable examples
68985 + are the XFree86 4.x server, the java runtime and wine.
68986 +
68987 +config PAX_PAGEEXEC
68988 + bool "Paging based non-executable pages"
68989 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
68990 + select S390_SWITCH_AMODE if S390
68991 + select S390_EXEC_PROTECT if S390
68992 + select ARCH_TRACK_EXEC_LIMIT if X86_32
68993 + help
68994 + This implementation is based on the paging feature of the CPU.
68995 + On i386 without hardware non-executable bit support there is a
68996 + variable but usually low performance impact, however on Intel's
68997 + P4 core based CPUs it is very high so you should not enable this
68998 + for kernels meant to be used on such CPUs.
68999 +
69000 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
69001 + with hardware non-executable bit support there is no performance
69002 + impact, on ppc the impact is negligible.
69003 +
69004 + Note that several architectures require various emulations due to
69005 + badly designed userland ABIs, this will cause a performance impact
69006 + but will disappear as soon as userland is fixed. For example, ppc
69007 + userland MUST have been built with secure-plt by a recent toolchain.
69008 +
69009 +config PAX_SEGMEXEC
69010 + bool "Segmentation based non-executable pages"
69011 + depends on PAX_NOEXEC && X86_32
69012 + help
69013 + This implementation is based on the segmentation feature of the
69014 + CPU and has a very small performance impact, however applications
69015 + will be limited to a 1.5 GB address space instead of the normal
69016 + 3 GB.
69017 +
69018 +config PAX_EMUTRAMP
69019 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
69020 + default y if PARISC
69021 + help
69022 + There are some programs and libraries that for one reason or
69023 + another attempt to execute special small code snippets from
69024 + non-executable memory pages. Most notable examples are the
69025 + signal handler return code generated by the kernel itself and
69026 + the GCC trampolines.
69027 +
69028 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
69029 + such programs will no longer work under your kernel.
69030 +
69031 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
69032 + utilities to enable trampoline emulation for the affected programs
69033 + yet still have the protection provided by the non-executable pages.
69034 +
69035 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
69036 + your system will not even boot.
69037 +
69038 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
69039 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
69040 + for the affected files.
69041 +
69042 + NOTE: enabling this feature *may* open up a loophole in the
69043 + protection provided by non-executable pages that an attacker
69044 + could abuse. Therefore the best solution is to not have any
69045 + files on your system that would require this option. This can
69046 + be achieved by not using libc5 (which relies on the kernel
69047 + signal handler return code) and not using or rewriting programs
69048 + that make use of the nested function implementation of GCC.
69049 + Skilled users can just fix GCC itself so that it implements
69050 + nested function calls in a way that does not interfere with PaX.
69051 +
69052 +config PAX_EMUSIGRT
69053 + bool "Automatically emulate sigreturn trampolines"
69054 + depends on PAX_EMUTRAMP && PARISC
69055 + default y
69056 + help
69057 + Enabling this option will have the kernel automatically detect
69058 + and emulate signal return trampolines executing on the stack
69059 + that would otherwise lead to task termination.
69060 +
69061 + This solution is intended as a temporary one for users with
69062 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
69063 + Modula-3 runtime, etc) or executables linked to such, basically
69064 + everything that does not specify its own SA_RESTORER function in
69065 + normal executable memory like glibc 2.1+ does.
69066 +
69067 + On parisc you MUST enable this option, otherwise your system will
69068 + not even boot.
69069 +
69070 + NOTE: this feature cannot be disabled on a per executable basis
69071 + and since it *does* open up a loophole in the protection provided
69072 + by non-executable pages, the best solution is to not have any
69073 + files on your system that would require this option.
69074 +
69075 +config PAX_MPROTECT
69076 + bool "Restrict mprotect()"
69077 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
69078 + help
69079 + Enabling this option will prevent programs from
69080 + - changing the executable status of memory pages that were
69081 + not originally created as executable,
69082 + - making read-only executable pages writable again,
69083 + - creating executable pages from anonymous memory,
69084 + - making read-only-after-relocations (RELRO) data pages writable again.
69085 +
69086 + You should say Y here to complete the protection provided by
69087 + the enforcement of non-executable pages.
69088 +
69089 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69090 + this feature on a per file basis.
69091 +
69092 +config PAX_MPROTECT_COMPAT
69093 + bool "Use legacy/compat protection demoting (read help)"
69094 + depends on PAX_MPROTECT
69095 + default n
69096 + help
69097 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
69098 + by sending the proper error code to the application. For some broken
69099 + userland, this can cause problems with Python or other applications. The
69100 + current implementation however allows for applications like clamav to
69101 + detect if JIT compilation/execution is allowed and to fall back gracefully
69102 + to an interpreter-based mode if it does not. While we encourage everyone
69103 + to use the current implementation as-is and push upstream to fix broken
69104 + userland (note that the RWX logging option can assist with this), in some
69105 + environments this may not be possible. Having to disable MPROTECT
69106 + completely on certain binaries reduces the security benefit of PaX,
69107 + so this option is provided for those environments to revert to the old
69108 + behavior.
69109 +
69110 +config PAX_ELFRELOCS
69111 + bool "Allow ELF text relocations (read help)"
69112 + depends on PAX_MPROTECT
69113 + default n
69114 + help
69115 + Non-executable pages and mprotect() restrictions are effective
69116 + in preventing the introduction of new executable code into an
69117 + attacked task's address space. There remain only two venues
69118 + for this kind of attack: if the attacker can execute already
69119 + existing code in the attacked task then he can either have it
69120 + create and mmap() a file containing his code or have it mmap()
69121 + an already existing ELF library that does not have position
69122 + independent code in it and use mprotect() on it to make it
69123 + writable and copy his code there. While protecting against
69124 + the former approach is beyond PaX, the latter can be prevented
69125 + by having only PIC ELF libraries on one's system (which do not
69126 + need to relocate their code). If you are sure this is your case,
69127 + as is the case with all modern Linux distributions, then leave
69128 + this option disabled. You should say 'n' here.
69129 +
69130 +config PAX_ETEXECRELOCS
69131 + bool "Allow ELF ET_EXEC text relocations"
69132 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
69133 + select PAX_ELFRELOCS
69134 + default y
69135 + help
69136 + On some architectures there are incorrectly created applications
69137 + that require text relocations and would not work without enabling
69138 + this option. If you are an alpha, ia64 or parisc user, you should
69139 + enable this option and disable it once you have made sure that
69140 + none of your applications need it.
69141 +
69142 +config PAX_EMUPLT
69143 + bool "Automatically emulate ELF PLT"
69144 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
69145 + default y
69146 + help
69147 + Enabling this option will have the kernel automatically detect
69148 + and emulate the Procedure Linkage Table entries in ELF files.
69149 + On some architectures such entries are in writable memory, and
69150 + become non-executable leading to task termination. Therefore
69151 + it is mandatory that you enable this option on alpha, parisc,
69152 + sparc and sparc64, otherwise your system would not even boot.
69153 +
69154 + NOTE: this feature *does* open up a loophole in the protection
69155 + provided by the non-executable pages, therefore the proper
69156 + solution is to modify the toolchain to produce a PLT that does
69157 + not need to be writable.
69158 +
69159 +config PAX_DLRESOLVE
69160 + bool 'Emulate old glibc resolver stub'
69161 + depends on PAX_EMUPLT && SPARC
69162 + default n
69163 + help
69164 + This option is needed if userland has an old glibc (before 2.4)
69165 + that puts a 'save' instruction into the runtime generated resolver
69166 + stub that needs special emulation.
69167 +
69168 +config PAX_KERNEXEC
69169 + bool "Enforce non-executable kernel pages"
69170 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
69171 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
69172 + help
69173 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
69174 + that is, enabling this option will make it harder to inject
69175 + and execute 'foreign' code in kernel memory itself.
69176 +
69177 + Note that on x86_64 kernels there is a known regression when
69178 + this feature and KVM/VMX are both enabled in the host kernel.
69179 +
69180 +config PAX_KERNEXEC_MODULE_TEXT
69181 + int "Minimum amount of memory reserved for module code"
69182 + default "4"
69183 + depends on PAX_KERNEXEC && X86_32 && MODULES
69184 + help
69185 + Due to implementation details the kernel must reserve a fixed
69186 + amount of memory for module code at compile time that cannot be
69187 + changed at runtime. Here you can specify the minimum amount
69188 + in MB that will be reserved. Due to the same implementation
69189 + details this size will always be rounded up to the next 2/4 MB
69190 + boundary (depends on PAE) so the actually available memory for
69191 + module code will usually be more than this minimum.
69192 +
69193 + The default 4 MB should be enough for most users but if you have
69194 + an excessive number of modules (e.g., most distribution configs
69195 + compile many drivers as modules) or use huge modules such as
69196 + nvidia's kernel driver, you will need to adjust this amount.
69197 + A good rule of thumb is to look at your currently loaded kernel
69198 + modules and add up their sizes.
69199 +
69200 +endmenu
69201 +
69202 +menu "Address Space Layout Randomization"
69203 + depends on PAX
69204 +
69205 +config PAX_ASLR
69206 + bool "Address Space Layout Randomization"
69207 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
69208 + help
69209 + Many if not most exploit techniques rely on the knowledge of
69210 + certain addresses in the attacked program. The following options
69211 + will allow the kernel to apply a certain amount of randomization
69212 + to specific parts of the program thereby forcing an attacker to
69213 + guess them in most cases. Any failed guess will most likely crash
69214 + the attacked program which allows the kernel to detect such attempts
69215 + and react on them. PaX itself provides no reaction mechanisms,
69216 + instead it is strongly encouraged that you make use of Nergal's
69217 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
69218 + (http://www.grsecurity.net/) built-in crash detection features or
69219 + develop one yourself.
69220 +
69221 + By saying Y here you can choose to randomize the following areas:
69222 + - top of the task's kernel stack
69223 + - top of the task's userland stack
69224 + - base address for mmap() requests that do not specify one
69225 + (this includes all libraries)
69226 + - base address of the main executable
69227 +
69228 + It is strongly recommended to say Y here as address space layout
69229 + randomization has negligible impact on performance yet it provides
69230 + a very effective protection.
69231 +
69232 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69233 + this feature on a per file basis.
69234 +
69235 +config PAX_RANDKSTACK
69236 + bool "Randomize kernel stack base"
69237 + depends on PAX_ASLR && X86_TSC && X86
69238 + help
69239 + By saying Y here the kernel will randomize every task's kernel
69240 + stack on every system call. This will not only force an attacker
69241 + to guess it but also prevent him from making use of possible
69242 + leaked information about it.
69243 +
69244 + Since the kernel stack is a rather scarce resource, randomization
69245 + may cause unexpected stack overflows, therefore you should very
69246 + carefully test your system. Note that once enabled in the kernel
69247 + configuration, this feature cannot be disabled on a per file basis.
69248 +
69249 +config PAX_RANDUSTACK
69250 + bool "Randomize user stack base"
69251 + depends on PAX_ASLR
69252 + help
69253 + By saying Y here the kernel will randomize every task's userland
69254 + stack. The randomization is done in two steps where the second
69255 + one may apply a big amount of shift to the top of the stack and
69256 + cause problems for programs that want to use lots of memory (more
69257 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
69258 + For this reason the second step can be controlled by 'chpax' or
69259 + 'paxctl' on a per file basis.
69260 +
69261 +config PAX_RANDMMAP
69262 + bool "Randomize mmap() base"
69263 + depends on PAX_ASLR
69264 + help
69265 + By saying Y here the kernel will use a randomized base address for
69266 + mmap() requests that do not specify one themselves. As a result
69267 + all dynamically loaded libraries will appear at random addresses
69268 + and therefore be harder to exploit by a technique where an attacker
69269 + attempts to execute library code for his purposes (e.g. spawn a
69270 + shell from an exploited program that is running at an elevated
69271 + privilege level).
69272 +
69273 + Furthermore, if a program is relinked as a dynamic ELF file, its
69274 + base address will be randomized as well, completing the full
69275 + randomization of the address space layout. Attacking such programs
69276 + becomes a guess game. You can find an example of doing this at
69277 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
69278 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
69279 +
69280 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
69281 + feature on a per file basis.
69282 +
69283 +endmenu
69284 +
69285 +menu "Miscellaneous hardening features"
69286 +
69287 +config PAX_MEMORY_SANITIZE
69288 + bool "Sanitize all freed memory"
69289 + help
69290 + By saying Y here the kernel will erase memory pages as soon as they
69291 + are freed. This in turn reduces the lifetime of data stored in the
69292 + pages, making it less likely that sensitive information such as
69293 + passwords, cryptographic secrets, etc stay in memory for too long.
69294 +
69295 + This is especially useful for programs whose runtime is short, long
69296 + lived processes and the kernel itself benefit from this as long as
69297 + they operate on whole memory pages and ensure timely freeing of pages
69298 + that may hold sensitive information.
69299 +
69300 + The tradeoff is performance impact, on a single CPU system kernel
69301 + compilation sees a 3% slowdown, other systems and workloads may vary
69302 + and you are advised to test this feature on your expected workload
69303 + before deploying it.
69304 +
69305 + Note that this feature does not protect data stored in live pages,
69306 + e.g., process memory swapped to disk may stay there for a long time.
69307 +
69308 +config PAX_MEMORY_STACKLEAK
69309 + bool "Sanitize kernel stack"
69310 + depends on X86
69311 + help
69312 + By saying Y here the kernel will erase the kernel stack before it
69313 + returns from a system call. This in turn reduces the information
69314 + that a kernel stack leak bug can reveal.
69315 +
69316 + Note that such a bug can still leak information that was put on
69317 + the stack by the current system call (the one eventually triggering
69318 + the bug) but traces of earlier system calls on the kernel stack
69319 + cannot leak anymore.
69320 +
69321 + The tradeoff is performance impact: on a single CPU system kernel
69322 + compilation sees a 1% slowdown, other systems and workloads may vary
69323 + and you are advised to test this feature on your expected workload
69324 + before deploying it.
69325 +
69326 + Note: full support for this feature requires gcc with plugin support
69327 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
69328 + is not supported). Using older gcc versions means that functions
69329 + with large enough stack frames may leave uninitialized memory behind
69330 + that may be exposed to a later syscall leaking the stack.
69331 +
69332 +config PAX_MEMORY_UDEREF
69333 + bool "Prevent invalid userland pointer dereference"
69334 + depends on X86 && !UML_X86 && !XEN
69335 + select PAX_PER_CPU_PGD if X86_64
69336 + help
69337 + By saying Y here the kernel will be prevented from dereferencing
69338 + userland pointers in contexts where the kernel expects only kernel
69339 + pointers. This is both a useful runtime debugging feature and a
69340 + security measure that prevents exploiting a class of kernel bugs.
69341 +
69342 + The tradeoff is that some virtualization solutions may experience
69343 + a huge slowdown and therefore you should not enable this feature
69344 + for kernels meant to run in such environments. Whether a given VM
69345 + solution is affected or not is best determined by simply trying it
69346 + out, the performance impact will be obvious right on boot as this
69347 + mechanism engages from very early on. A good rule of thumb is that
69348 + VMs running on CPUs without hardware virtualization support (i.e.,
69349 + the majority of IA-32 CPUs) will likely experience the slowdown.
69350 +
69351 +config PAX_REFCOUNT
69352 + bool "Prevent various kernel object reference counter overflows"
69353 + depends on GRKERNSEC && (X86 || SPARC64)
69354 + help
69355 + By saying Y here the kernel will detect and prevent overflowing
69356 + various (but not all) kinds of object reference counters. Such
69357 + overflows can normally occur due to bugs only and are often, if
69358 + not always, exploitable.
69359 +
69360 + The tradeoff is that data structures protected by an overflowed
69361 + refcount will never be freed and therefore will leak memory. Note
69362 + that this leak also happens even without this protection but in
69363 + that case the overflow can eventually trigger the freeing of the
69364 + data structure while it is still being used elsewhere, resulting
69365 + in the exploitable situation that this feature prevents.
69366 +
69367 + Since this has a negligible performance impact, you should enable
69368 + this feature.
69369 +
69370 +config PAX_USERCOPY
69371 + bool "Harden heap object copies between kernel and userland"
69372 + depends on X86 || PPC || SPARC || ARM
69373 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
69374 + help
69375 + By saying Y here the kernel will enforce the size of heap objects
69376 + when they are copied in either direction between the kernel and
69377 + userland, even if only a part of the heap object is copied.
69378 +
69379 + Specifically, this checking prevents information leaking from the
69380 + kernel heap during kernel to userland copies (if the kernel heap
69381 + object is otherwise fully initialized) and prevents kernel heap
69382 + overflows during userland to kernel copies.
69383 +
69384 + Note that the current implementation provides the strictest bounds
69385 + checks for the SLUB allocator.
69386 +
69387 + Enabling this option also enables per-slab cache protection against
69388 + data in a given cache being copied into/out of via userland
69389 + accessors. Though the whitelist of regions will be reduced over
69390 + time, it notably protects important data structures like task structs.
69391 +
69392 + If frame pointers are enabled on x86, this option will also restrict
69393 + copies into and out of the kernel stack to local variables within a
69394 + single frame.
69395 +
69396 + Since this has a negligible performance impact, you should enable
69397 + this feature.
69398 +
69399 +endmenu
69400 +
69401 +endmenu
69402 +
69403 config KEYS
69404 bool "Enable access key retention support"
69405 help
69406 @@ -167,7 +715,7 @@ config INTEL_TXT
69407 config LSM_MMAP_MIN_ADDR
69408 int "Low address space for LSM to protect from user allocation"
69409 depends on SECURITY && SECURITY_SELINUX
69410 - default 65536
69411 + default 32768
69412 help
69413 This is the portion of low virtual memory which should be protected
69414 from userspace allocation. Keeping a user from writing to low pages
69415 diff -urNp linux-2.6.39.4/security/keys/keyring.c linux-2.6.39.4/security/keys/keyring.c
69416 --- linux-2.6.39.4/security/keys/keyring.c 2011-05-19 00:06:34.000000000 -0400
69417 +++ linux-2.6.39.4/security/keys/keyring.c 2011-08-05 19:44:37.000000000 -0400
69418 @@ -213,15 +213,15 @@ static long keyring_read(const struct ke
69419 ret = -EFAULT;
69420
69421 for (loop = 0; loop < klist->nkeys; loop++) {
69422 + key_serial_t serial;
69423 key = klist->keys[loop];
69424 + serial = key->serial;
69425
69426 tmp = sizeof(key_serial_t);
69427 if (tmp > buflen)
69428 tmp = buflen;
69429
69430 - if (copy_to_user(buffer,
69431 - &key->serial,
69432 - tmp) != 0)
69433 + if (copy_to_user(buffer, &serial, tmp))
69434 goto error;
69435
69436 buflen -= tmp;
69437 diff -urNp linux-2.6.39.4/security/min_addr.c linux-2.6.39.4/security/min_addr.c
69438 --- linux-2.6.39.4/security/min_addr.c 2011-05-19 00:06:34.000000000 -0400
69439 +++ linux-2.6.39.4/security/min_addr.c 2011-08-05 19:44:37.000000000 -0400
69440 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
69441 */
69442 static void update_mmap_min_addr(void)
69443 {
69444 +#ifndef SPARC
69445 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
69446 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
69447 mmap_min_addr = dac_mmap_min_addr;
69448 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
69449 #else
69450 mmap_min_addr = dac_mmap_min_addr;
69451 #endif
69452 +#endif
69453 }
69454
69455 /*
69456 diff -urNp linux-2.6.39.4/security/security.c linux-2.6.39.4/security/security.c
69457 --- linux-2.6.39.4/security/security.c 2011-05-19 00:06:34.000000000 -0400
69458 +++ linux-2.6.39.4/security/security.c 2011-08-05 19:44:37.000000000 -0400
69459 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
69460 /* things that live in capability.c */
69461 extern void __init security_fixup_ops(struct security_operations *ops);
69462
69463 -static struct security_operations *security_ops;
69464 -static struct security_operations default_security_ops = {
69465 +static struct security_operations *security_ops __read_only;
69466 +static struct security_operations default_security_ops __read_only = {
69467 .name = "default",
69468 };
69469
69470 @@ -67,7 +67,9 @@ int __init security_init(void)
69471
69472 void reset_security_ops(void)
69473 {
69474 + pax_open_kernel();
69475 security_ops = &default_security_ops;
69476 + pax_close_kernel();
69477 }
69478
69479 /* Save user chosen LSM */
69480 diff -urNp linux-2.6.39.4/security/selinux/hooks.c linux-2.6.39.4/security/selinux/hooks.c
69481 --- linux-2.6.39.4/security/selinux/hooks.c 2011-05-19 00:06:34.000000000 -0400
69482 +++ linux-2.6.39.4/security/selinux/hooks.c 2011-08-05 19:44:37.000000000 -0400
69483 @@ -93,7 +93,6 @@
69484 #define NUM_SEL_MNT_OPTS 5
69485
69486 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
69487 -extern struct security_operations *security_ops;
69488
69489 /* SECMARK reference count */
69490 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
69491 @@ -5431,7 +5430,7 @@ static int selinux_key_getsecurity(struc
69492
69493 #endif
69494
69495 -static struct security_operations selinux_ops = {
69496 +static struct security_operations selinux_ops __read_only = {
69497 .name = "selinux",
69498
69499 .ptrace_access_check = selinux_ptrace_access_check,
69500 diff -urNp linux-2.6.39.4/security/selinux/include/xfrm.h linux-2.6.39.4/security/selinux/include/xfrm.h
69501 --- linux-2.6.39.4/security/selinux/include/xfrm.h 2011-05-19 00:06:34.000000000 -0400
69502 +++ linux-2.6.39.4/security/selinux/include/xfrm.h 2011-08-05 19:44:37.000000000 -0400
69503 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
69504
69505 static inline void selinux_xfrm_notify_policyload(void)
69506 {
69507 - atomic_inc(&flow_cache_genid);
69508 + atomic_inc_unchecked(&flow_cache_genid);
69509 }
69510 #else
69511 static inline int selinux_xfrm_enabled(void)
69512 diff -urNp linux-2.6.39.4/security/selinux/ss/services.c linux-2.6.39.4/security/selinux/ss/services.c
69513 --- linux-2.6.39.4/security/selinux/ss/services.c 2011-05-19 00:06:34.000000000 -0400
69514 +++ linux-2.6.39.4/security/selinux/ss/services.c 2011-08-05 19:44:37.000000000 -0400
69515 @@ -1806,6 +1806,8 @@ int security_load_policy(void *data, siz
69516 int rc = 0;
69517 struct policy_file file = { data, len }, *fp = &file;
69518
69519 + pax_track_stack();
69520 +
69521 if (!ss_initialized) {
69522 avtab_cache_init();
69523 rc = policydb_read(&policydb, fp);
69524 diff -urNp linux-2.6.39.4/security/smack/smack_lsm.c linux-2.6.39.4/security/smack/smack_lsm.c
69525 --- linux-2.6.39.4/security/smack/smack_lsm.c 2011-05-19 00:06:34.000000000 -0400
69526 +++ linux-2.6.39.4/security/smack/smack_lsm.c 2011-08-05 19:44:37.000000000 -0400
69527 @@ -3386,7 +3386,7 @@ static int smack_inode_getsecctx(struct
69528 return 0;
69529 }
69530
69531 -struct security_operations smack_ops = {
69532 +struct security_operations smack_ops __read_only = {
69533 .name = "smack",
69534
69535 .ptrace_access_check = smack_ptrace_access_check,
69536 diff -urNp linux-2.6.39.4/security/tomoyo/tomoyo.c linux-2.6.39.4/security/tomoyo/tomoyo.c
69537 --- linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-05-19 00:06:34.000000000 -0400
69538 +++ linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-08-05 19:44:37.000000000 -0400
69539 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
69540 * tomoyo_security_ops is a "struct security_operations" which is used for
69541 * registering TOMOYO.
69542 */
69543 -static struct security_operations tomoyo_security_ops = {
69544 +static struct security_operations tomoyo_security_ops __read_only = {
69545 .name = "tomoyo",
69546 .cred_alloc_blank = tomoyo_cred_alloc_blank,
69547 .cred_prepare = tomoyo_cred_prepare,
69548 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.c linux-2.6.39.4/sound/aoa/codecs/onyx.c
69549 --- linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-05-19 00:06:34.000000000 -0400
69550 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-08-05 19:44:37.000000000 -0400
69551 @@ -54,7 +54,7 @@ struct onyx {
69552 spdif_locked:1,
69553 analog_locked:1,
69554 original_mute:2;
69555 - int open_count;
69556 + local_t open_count;
69557 struct codec_info *codec_info;
69558
69559 /* mutex serializes concurrent access to the device
69560 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
69561 struct onyx *onyx = cii->codec_data;
69562
69563 mutex_lock(&onyx->mutex);
69564 - onyx->open_count++;
69565 + local_inc(&onyx->open_count);
69566 mutex_unlock(&onyx->mutex);
69567
69568 return 0;
69569 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
69570 struct onyx *onyx = cii->codec_data;
69571
69572 mutex_lock(&onyx->mutex);
69573 - onyx->open_count--;
69574 - if (!onyx->open_count)
69575 + if (local_dec_and_test(&onyx->open_count))
69576 onyx->spdif_locked = onyx->analog_locked = 0;
69577 mutex_unlock(&onyx->mutex);
69578
69579 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.h linux-2.6.39.4/sound/aoa/codecs/onyx.h
69580 --- linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-05-19 00:06:34.000000000 -0400
69581 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-08-05 19:44:37.000000000 -0400
69582 @@ -11,6 +11,7 @@
69583 #include <linux/i2c.h>
69584 #include <asm/pmac_low_i2c.h>
69585 #include <asm/prom.h>
69586 +#include <asm/local.h>
69587
69588 /* PCM3052 register definitions */
69589
69590 diff -urNp linux-2.6.39.4/sound/core/seq/seq_device.c linux-2.6.39.4/sound/core/seq/seq_device.c
69591 --- linux-2.6.39.4/sound/core/seq/seq_device.c 2011-05-19 00:06:34.000000000 -0400
69592 +++ linux-2.6.39.4/sound/core/seq/seq_device.c 2011-08-05 20:34:06.000000000 -0400
69593 @@ -63,7 +63,7 @@ struct ops_list {
69594 int argsize; /* argument size */
69595
69596 /* operators */
69597 - struct snd_seq_dev_ops ops;
69598 + struct snd_seq_dev_ops *ops;
69599
69600 /* registred devices */
69601 struct list_head dev_list; /* list of devices */
69602 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
69603
69604 mutex_lock(&ops->reg_mutex);
69605 /* copy driver operators */
69606 - ops->ops = *entry;
69607 + ops->ops = entry;
69608 ops->driver |= DRIVER_LOADED;
69609 ops->argsize = argsize;
69610
69611 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
69612 dev->name, ops->id, ops->argsize, dev->argsize);
69613 return -EINVAL;
69614 }
69615 - if (ops->ops.init_device(dev) >= 0) {
69616 + if (ops->ops->init_device(dev) >= 0) {
69617 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
69618 ops->num_init_devices++;
69619 } else {
69620 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
69621 dev->name, ops->id, ops->argsize, dev->argsize);
69622 return -EINVAL;
69623 }
69624 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
69625 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
69626 dev->status = SNDRV_SEQ_DEVICE_FREE;
69627 dev->driver_data = NULL;
69628 ops->num_init_devices--;
69629 diff -urNp linux-2.6.39.4/sound/drivers/mts64.c linux-2.6.39.4/sound/drivers/mts64.c
69630 --- linux-2.6.39.4/sound/drivers/mts64.c 2011-05-19 00:06:34.000000000 -0400
69631 +++ linux-2.6.39.4/sound/drivers/mts64.c 2011-08-05 20:34:06.000000000 -0400
69632 @@ -28,6 +28,7 @@
69633 #include <sound/initval.h>
69634 #include <sound/rawmidi.h>
69635 #include <sound/control.h>
69636 +#include <asm/local.h>
69637
69638 #define CARD_NAME "Miditerminal 4140"
69639 #define DRIVER_NAME "MTS64"
69640 @@ -66,7 +67,7 @@ struct mts64 {
69641 struct pardevice *pardev;
69642 int pardev_claimed;
69643
69644 - int open_count;
69645 + local_t open_count;
69646 int current_midi_output_port;
69647 int current_midi_input_port;
69648 u8 mode[MTS64_NUM_INPUT_PORTS];
69649 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
69650 {
69651 struct mts64 *mts = substream->rmidi->private_data;
69652
69653 - if (mts->open_count == 0) {
69654 + if (local_read(&mts->open_count) == 0) {
69655 /* We don't need a spinlock here, because this is just called
69656 if the device has not been opened before.
69657 So there aren't any IRQs from the device */
69658 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
69659
69660 msleep(50);
69661 }
69662 - ++(mts->open_count);
69663 + local_inc(&mts->open_count);
69664
69665 return 0;
69666 }
69667 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
69668 struct mts64 *mts = substream->rmidi->private_data;
69669 unsigned long flags;
69670
69671 - --(mts->open_count);
69672 - if (mts->open_count == 0) {
69673 + if (local_dec_return(&mts->open_count) == 0) {
69674 /* We need the spinlock_irqsave here because we can still
69675 have IRQs at this point */
69676 spin_lock_irqsave(&mts->lock, flags);
69677 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
69678
69679 msleep(500);
69680
69681 - } else if (mts->open_count < 0)
69682 - mts->open_count = 0;
69683 + } else if (local_read(&mts->open_count) < 0)
69684 + local_set(&mts->open_count, 0);
69685
69686 return 0;
69687 }
69688 diff -urNp linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c
69689 --- linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-05-19 00:06:34.000000000 -0400
69690 +++ linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:34:06.000000000 -0400
69691 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
69692 MODULE_DESCRIPTION("OPL4 driver");
69693 MODULE_LICENSE("GPL");
69694
69695 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
69696 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
69697 {
69698 int timeout = 10;
69699 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
69700 diff -urNp linux-2.6.39.4/sound/drivers/portman2x4.c linux-2.6.39.4/sound/drivers/portman2x4.c
69701 --- linux-2.6.39.4/sound/drivers/portman2x4.c 2011-05-19 00:06:34.000000000 -0400
69702 +++ linux-2.6.39.4/sound/drivers/portman2x4.c 2011-08-05 20:34:06.000000000 -0400
69703 @@ -47,6 +47,7 @@
69704 #include <sound/initval.h>
69705 #include <sound/rawmidi.h>
69706 #include <sound/control.h>
69707 +#include <asm/local.h>
69708
69709 #define CARD_NAME "Portman 2x4"
69710 #define DRIVER_NAME "portman"
69711 @@ -84,7 +85,7 @@ struct portman {
69712 struct pardevice *pardev;
69713 int pardev_claimed;
69714
69715 - int open_count;
69716 + local_t open_count;
69717 int mode[PORTMAN_NUM_INPUT_PORTS];
69718 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
69719 };
69720 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.c linux-2.6.39.4/sound/firewire/amdtp.c
69721 --- linux-2.6.39.4/sound/firewire/amdtp.c 2011-05-19 00:06:34.000000000 -0400
69722 +++ linux-2.6.39.4/sound/firewire/amdtp.c 2011-08-05 19:44:37.000000000 -0400
69723 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
69724 ptr = s->pcm_buffer_pointer + data_blocks;
69725 if (ptr >= pcm->runtime->buffer_size)
69726 ptr -= pcm->runtime->buffer_size;
69727 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
69728 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
69729
69730 s->pcm_period_pointer += data_blocks;
69731 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
69732 @@ -510,7 +510,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
69733 */
69734 void amdtp_out_stream_update(struct amdtp_out_stream *s)
69735 {
69736 - ACCESS_ONCE(s->source_node_id_field) =
69737 + ACCESS_ONCE_RW(s->source_node_id_field) =
69738 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
69739 }
69740 EXPORT_SYMBOL(amdtp_out_stream_update);
69741 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.h linux-2.6.39.4/sound/firewire/amdtp.h
69742 --- linux-2.6.39.4/sound/firewire/amdtp.h 2011-05-19 00:06:34.000000000 -0400
69743 +++ linux-2.6.39.4/sound/firewire/amdtp.h 2011-08-05 19:44:37.000000000 -0400
69744 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
69745 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
69746 struct snd_pcm_substream *pcm)
69747 {
69748 - ACCESS_ONCE(s->pcm) = pcm;
69749 + ACCESS_ONCE_RW(s->pcm) = pcm;
69750 }
69751
69752 /**
69753 diff -urNp linux-2.6.39.4/sound/isa/cmi8330.c linux-2.6.39.4/sound/isa/cmi8330.c
69754 --- linux-2.6.39.4/sound/isa/cmi8330.c 2011-05-19 00:06:34.000000000 -0400
69755 +++ linux-2.6.39.4/sound/isa/cmi8330.c 2011-08-05 20:34:06.000000000 -0400
69756 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
69757
69758 struct snd_pcm *pcm;
69759 struct snd_cmi8330_stream {
69760 - struct snd_pcm_ops ops;
69761 + snd_pcm_ops_no_const ops;
69762 snd_pcm_open_callback_t open;
69763 void *private_data; /* sb or wss */
69764 } streams[2];
69765 diff -urNp linux-2.6.39.4/sound/oss/sb_audio.c linux-2.6.39.4/sound/oss/sb_audio.c
69766 --- linux-2.6.39.4/sound/oss/sb_audio.c 2011-05-19 00:06:34.000000000 -0400
69767 +++ linux-2.6.39.4/sound/oss/sb_audio.c 2011-08-05 19:44:37.000000000 -0400
69768 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
69769 buf16 = (signed short *)(localbuf + localoffs);
69770 while (c)
69771 {
69772 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69773 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69774 if (copy_from_user(lbuf8,
69775 userbuf+useroffs + p,
69776 locallen))
69777 diff -urNp linux-2.6.39.4/sound/oss/swarm_cs4297a.c linux-2.6.39.4/sound/oss/swarm_cs4297a.c
69778 --- linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-05-19 00:06:34.000000000 -0400
69779 +++ linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-08-05 19:44:37.000000000 -0400
69780 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
69781 {
69782 struct cs4297a_state *s;
69783 u32 pwr, id;
69784 - mm_segment_t fs;
69785 int rval;
69786 #ifndef CONFIG_BCM_CS4297A_CSWARM
69787 u64 cfg;
69788 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
69789 if (!rval) {
69790 char *sb1250_duart_present;
69791
69792 +#if 0
69793 + mm_segment_t fs;
69794 fs = get_fs();
69795 set_fs(KERNEL_DS);
69796 -#if 0
69797 val = SOUND_MASK_LINE;
69798 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
69799 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
69800 val = initvol[i].vol;
69801 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
69802 }
69803 + set_fs(fs);
69804 // cs4297a_write_ac97(s, 0x18, 0x0808);
69805 #else
69806 // cs4297a_write_ac97(s, 0x5e, 0x180);
69807 cs4297a_write_ac97(s, 0x02, 0x0808);
69808 cs4297a_write_ac97(s, 0x18, 0x0808);
69809 #endif
69810 - set_fs(fs);
69811
69812 list_add(&s->list, &cs4297a_devs);
69813
69814 diff -urNp linux-2.6.39.4/sound/pci/hda/hda_codec.h linux-2.6.39.4/sound/pci/hda/hda_codec.h
69815 --- linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-05-19 00:06:34.000000000 -0400
69816 +++ linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-08-05 20:34:06.000000000 -0400
69817 @@ -615,7 +615,7 @@ struct hda_bus_ops {
69818 /* notify power-up/down from codec to controller */
69819 void (*pm_notify)(struct hda_bus *bus);
69820 #endif
69821 -};
69822 +} __no_const;
69823
69824 /* template to pass to the bus constructor */
69825 struct hda_bus_template {
69826 @@ -713,6 +713,7 @@ struct hda_codec_ops {
69827 #endif
69828 void (*reboot_notify)(struct hda_codec *codec);
69829 };
69830 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
69831
69832 /* record for amp information cache */
69833 struct hda_cache_head {
69834 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
69835 struct snd_pcm_substream *substream);
69836 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
69837 struct snd_pcm_substream *substream);
69838 -};
69839 +} __no_const;
69840
69841 /* PCM information for each substream */
69842 struct hda_pcm_stream {
69843 @@ -801,7 +802,7 @@ struct hda_codec {
69844 const char *modelname; /* model name for preset */
69845
69846 /* set by patch */
69847 - struct hda_codec_ops patch_ops;
69848 + hda_codec_ops_no_const patch_ops;
69849
69850 /* PCM to create, set by patch_ops.build_pcms callback */
69851 unsigned int num_pcms;
69852 diff -urNp linux-2.6.39.4/sound/pci/ice1712/ice1712.h linux-2.6.39.4/sound/pci/ice1712/ice1712.h
69853 --- linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-05-19 00:06:34.000000000 -0400
69854 +++ linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-08-05 20:34:06.000000000 -0400
69855 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
69856 unsigned int mask_flags; /* total mask bits */
69857 struct snd_akm4xxx_ops {
69858 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69859 - } ops;
69860 + } __no_const ops;
69861 };
69862
69863 struct snd_ice1712_spdif {
69864 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
69865 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69866 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69867 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69868 - } ops;
69869 + } __no_const ops;
69870 };
69871
69872
69873 diff -urNp linux-2.6.39.4/sound/pci/intel8x0m.c linux-2.6.39.4/sound/pci/intel8x0m.c
69874 --- linux-2.6.39.4/sound/pci/intel8x0m.c 2011-05-19 00:06:34.000000000 -0400
69875 +++ linux-2.6.39.4/sound/pci/intel8x0m.c 2011-08-05 20:34:06.000000000 -0400
69876 @@ -1265,7 +1265,7 @@ static struct shortname_table {
69877 { 0x5455, "ALi M5455" },
69878 { 0x746d, "AMD AMD8111" },
69879 #endif
69880 - { 0 },
69881 + { 0, },
69882 };
69883
69884 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
69885 diff -urNp linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c
69886 --- linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-05-19 00:06:34.000000000 -0400
69887 +++ linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-08-05 20:34:06.000000000 -0400
69888 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
69889 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
69890 break;
69891 }
69892 - if (atomic_read(&chip->interrupt_sleep_count)) {
69893 - atomic_set(&chip->interrupt_sleep_count, 0);
69894 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69895 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69896 wake_up(&chip->interrupt_sleep);
69897 }
69898 __end:
69899 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
69900 continue;
69901 init_waitqueue_entry(&wait, current);
69902 add_wait_queue(&chip->interrupt_sleep, &wait);
69903 - atomic_inc(&chip->interrupt_sleep_count);
69904 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
69905 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
69906 remove_wait_queue(&chip->interrupt_sleep, &wait);
69907 }
69908 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
69909 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
69910 spin_unlock(&chip->reg_lock);
69911
69912 - if (atomic_read(&chip->interrupt_sleep_count)) {
69913 - atomic_set(&chip->interrupt_sleep_count, 0);
69914 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69915 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69916 wake_up(&chip->interrupt_sleep);
69917 }
69918 }
69919 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
69920 spin_lock_init(&chip->reg_lock);
69921 spin_lock_init(&chip->voice_lock);
69922 init_waitqueue_head(&chip->interrupt_sleep);
69923 - atomic_set(&chip->interrupt_sleep_count, 0);
69924 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69925 chip->card = card;
69926 chip->pci = pci;
69927 chip->irq = -1;
69928 diff -urNp linux-2.6.39.4/sound/soc/soc-core.c linux-2.6.39.4/sound/soc/soc-core.c
69929 --- linux-2.6.39.4/sound/soc/soc-core.c 2011-05-19 00:06:34.000000000 -0400
69930 +++ linux-2.6.39.4/sound/soc/soc-core.c 2011-08-05 20:34:06.000000000 -0400
69931 @@ -1027,7 +1027,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
69932 }
69933
69934 /* ASoC PCM operations */
69935 -static struct snd_pcm_ops soc_pcm_ops = {
69936 +static snd_pcm_ops_no_const soc_pcm_ops = {
69937 .open = soc_pcm_open,
69938 .close = soc_codec_close,
69939 .hw_params = soc_pcm_hw_params,
69940 @@ -2105,6 +2105,7 @@ static int soc_new_pcm(struct snd_soc_pc
69941
69942 rtd->pcm = pcm;
69943 pcm->private_data = rtd;
69944 + /* this whole logic is broken... */
69945 soc_pcm_ops.mmap = platform->driver->ops->mmap;
69946 soc_pcm_ops.pointer = platform->driver->ops->pointer;
69947 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
69948 diff -urNp linux-2.6.39.4/sound/usb/card.h linux-2.6.39.4/sound/usb/card.h
69949 --- linux-2.6.39.4/sound/usb/card.h 2011-05-19 00:06:34.000000000 -0400
69950 +++ linux-2.6.39.4/sound/usb/card.h 2011-08-05 20:34:06.000000000 -0400
69951 @@ -44,6 +44,7 @@ struct snd_urb_ops {
69952 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69953 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
69954 };
69955 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
69956
69957 struct snd_usb_substream {
69958 struct snd_usb_stream *stream;
69959 @@ -93,7 +94,7 @@ struct snd_usb_substream {
69960 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
69961 spinlock_t lock;
69962
69963 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
69964 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
69965 };
69966
69967 struct snd_usb_stream {
69968 diff -urNp linux-2.6.39.4/tools/gcc/constify_plugin.c linux-2.6.39.4/tools/gcc/constify_plugin.c
69969 --- linux-2.6.39.4/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
69970 +++ linux-2.6.39.4/tools/gcc/constify_plugin.c 2011-08-05 20:34:06.000000000 -0400
69971 @@ -0,0 +1,189 @@
69972 +/*
69973 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
69974 + * Licensed under the GPL v2, or (at your option) v3
69975 + *
69976 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
69977 + *
69978 + * Usage:
69979 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
69980 + * $ gcc -fplugin=constify_plugin.so test.c -O2
69981 + */
69982 +
69983 +#include "gcc-plugin.h"
69984 +#include "config.h"
69985 +#include "system.h"
69986 +#include "coretypes.h"
69987 +#include "tree.h"
69988 +#include "tree-pass.h"
69989 +#include "intl.h"
69990 +#include "plugin-version.h"
69991 +#include "tm.h"
69992 +#include "toplev.h"
69993 +#include "function.h"
69994 +#include "tree-flow.h"
69995 +#include "plugin.h"
69996 +
69997 +int plugin_is_GPL_compatible;
69998 +
69999 +static struct plugin_info const_plugin_info = {
70000 + .version = "20110721",
70001 + .help = "no-constify\tturn off constification\n",
70002 +};
70003 +
70004 +static bool walk_struct(tree node);
70005 +
70006 +static void deconstify_node(tree node)
70007 +{
70008 + tree field;
70009 +
70010 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70011 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70012 + if (code == RECORD_TYPE || code == UNION_TYPE)
70013 + deconstify_node(TREE_TYPE(field));
70014 + TREE_READONLY(field) = 0;
70015 + TREE_READONLY(TREE_TYPE(field)) = 0;
70016 + }
70017 +}
70018 +
70019 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
70020 +{
70021 + if (TREE_CODE(*node) == FUNCTION_DECL) {
70022 + error("%qE attribute does not apply to functions", name);
70023 + *no_add_attrs = true;
70024 + return NULL_TREE;
70025 + }
70026 +
70027 + if (DECL_P(*node) && lookup_attribute("no_const", TYPE_ATTRIBUTES(TREE_TYPE(*node)))) {
70028 + error("%qE attribute is already applied to the type" , name);
70029 + *no_add_attrs = true;
70030 + return NULL_TREE;
70031 + }
70032 +
70033 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(TREE_TYPE(*node))) {
70034 + error("%qE attribute used on type that is not constified" , name);
70035 + *no_add_attrs = true;
70036 + return NULL_TREE;
70037 + }
70038 +
70039 + if (TREE_CODE(*node) == TYPE_DECL) {
70040 + tree chain = TREE_CHAIN(TREE_TYPE(*node));
70041 + TREE_TYPE(*node) = copy_node(TREE_TYPE(*node));
70042 + TREE_CHAIN(TREE_TYPE(*node)) = copy_list(chain);
70043 + TREE_READONLY(TREE_TYPE(*node)) = 0;
70044 + deconstify_node(TREE_TYPE(*node));
70045 + return NULL_TREE;
70046 + }
70047 +
70048 + return NULL_TREE;
70049 +}
70050 +
70051 +static struct attribute_spec no_const_attr = {
70052 + .name = "no_const",
70053 + .min_length = 0,
70054 + .max_length = 0,
70055 + .decl_required = false,
70056 + .type_required = false,
70057 + .function_type_required = false,
70058 + .handler = handle_no_const_attribute
70059 +};
70060 +
70061 +static void register_attributes(void *event_data, void *data)
70062 +{
70063 + register_attribute(&no_const_attr);
70064 +}
70065 +
70066 +/*
70067 +static void printnode(char *prefix, tree node)
70068 +{
70069 + enum tree_code code;
70070 + enum tree_code_class tclass;
70071 +
70072 + tclass = TREE_CODE_CLASS(TREE_CODE (node));
70073 +
70074 + code = TREE_CODE(node);
70075 + fprintf(stderr, "\n%s node: %p, code: %d type: %s\n", prefix, node, code, tree_code_name[(int)code]);
70076 + if (DECL_CONTEXT(node) != NULL_TREE && TYPE_NAME(DECL_CONTEXT(node)) != NULL_TREE)
70077 + fprintf(stderr, "struct name: %s\n", IDENTIFIER_POINTER(TYPE_NAME(DECL_CONTEXT(node))));
70078 + if (tclass == tcc_declaration && DECL_NAME(node) != NULL_TREE)
70079 + fprintf(stderr, "field name: %s\n", IDENTIFIER_POINTER(DECL_NAME(node)));
70080 +}
70081 +*/
70082 +
70083 +static void constify_node(tree node)
70084 +{
70085 + TREE_READONLY(node) = 1;
70086 +}
70087 +
70088 +static bool is_fptr(tree field)
70089 +{
70090 + tree ptr = TREE_TYPE(field);
70091 +
70092 + if (TREE_CODE(ptr) != POINTER_TYPE)
70093 + return false;
70094 +
70095 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
70096 +}
70097 +
70098 +static bool walk_struct(tree node)
70099 +{
70100 + tree field;
70101 +
70102 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70103 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70104 + if (code == RECORD_TYPE || code == UNION_TYPE) {
70105 + if (!(walk_struct(TREE_TYPE(field))))
70106 + return false;
70107 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
70108 + return false;
70109 + }
70110 + return true;
70111 +}
70112 +
70113 +static void finish_type(void *event_data, void *data)
70114 +{
70115 + tree node = (tree)event_data;
70116 +
70117 + if (node == NULL_TREE)
70118 + return;
70119 +
70120 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
70121 + return;
70122 +
70123 + if (TREE_READONLY(node))
70124 + return;
70125 +
70126 + if (TYPE_FIELDS(node) == NULL_TREE)
70127 + return;
70128 +
70129 + if (walk_struct(node))
70130 + constify_node(node);
70131 +}
70132 +
70133 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70134 +{
70135 + const char * const plugin_name = plugin_info->base_name;
70136 + const int argc = plugin_info->argc;
70137 + const struct plugin_argument * const argv = plugin_info->argv;
70138 + int i;
70139 + bool constify = true;
70140 +
70141 + if (!plugin_default_version_check(version, &gcc_version)) {
70142 + error(G_("incompatible gcc/plugin versions"));
70143 + return 1;
70144 + }
70145 +
70146 + for (i = 0; i < argc; ++i) {
70147 + if (!(strcmp(argv[i].key, "no-constify"))) {
70148 + constify = false;
70149 + continue;
70150 + }
70151 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70152 + }
70153 +
70154 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
70155 + if (constify)
70156 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
70157 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
70158 +
70159 + return 0;
70160 +}
70161 diff -urNp linux-2.6.39.4/tools/gcc/Makefile linux-2.6.39.4/tools/gcc/Makefile
70162 --- linux-2.6.39.4/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
70163 +++ linux-2.6.39.4/tools/gcc/Makefile 2011-08-05 20:34:06.000000000 -0400
70164 @@ -0,0 +1,12 @@
70165 +#CC := gcc
70166 +#PLUGIN_SOURCE_FILES := pax_plugin.c
70167 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
70168 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
70169 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
70170 +
70171 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
70172 +
70173 +hostlibs-y := stackleak_plugin.so constify_plugin.so
70174 +always := $(hostlibs-y)
70175 +stackleak_plugin-objs := stackleak_plugin.o
70176 +constify_plugin-objs := constify_plugin.o
70177 diff -urNp linux-2.6.39.4/tools/gcc/stackleak_plugin.c linux-2.6.39.4/tools/gcc/stackleak_plugin.c
70178 --- linux-2.6.39.4/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
70179 +++ linux-2.6.39.4/tools/gcc/stackleak_plugin.c 2011-08-05 20:34:06.000000000 -0400
70180 @@ -0,0 +1,243 @@
70181 +/*
70182 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
70183 + * Licensed under the GPL v2
70184 + *
70185 + * Note: the choice of the license means that the compilation process is
70186 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
70187 + * but for the kernel it doesn't matter since it doesn't link against
70188 + * any of the gcc libraries
70189 + *
70190 + * gcc plugin to help implement various PaX features
70191 + *
70192 + * - track lowest stack pointer
70193 + *
70194 + * TODO:
70195 + * - initialize all local variables
70196 + *
70197 + * BUGS:
70198 + * - cloned functions are instrumented twice
70199 + */
70200 +#include "gcc-plugin.h"
70201 +#include "plugin-version.h"
70202 +#include "config.h"
70203 +#include "system.h"
70204 +#include "coretypes.h"
70205 +#include "tm.h"
70206 +#include "toplev.h"
70207 +#include "basic-block.h"
70208 +#include "gimple.h"
70209 +//#include "expr.h" where are you...
70210 +#include "diagnostic.h"
70211 +#include "rtl.h"
70212 +#include "emit-rtl.h"
70213 +#include "function.h"
70214 +#include "tree.h"
70215 +#include "tree-pass.h"
70216 +#include "intl.h"
70217 +
70218 +int plugin_is_GPL_compatible;
70219 +
70220 +static int track_frame_size = -1;
70221 +static const char track_function[] = "pax_track_stack";
70222 +static bool init_locals;
70223 +
70224 +static struct plugin_info stackleak_plugin_info = {
70225 + .version = "201106030000",
70226 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
70227 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
70228 +};
70229 +
70230 +static bool gate_stackleak_track_stack(void);
70231 +static unsigned int execute_stackleak_tree_instrument(void);
70232 +static unsigned int execute_stackleak_final(void);
70233 +
70234 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
70235 + .pass = {
70236 + .type = GIMPLE_PASS,
70237 + .name = "stackleak_tree_instrument",
70238 + .gate = gate_stackleak_track_stack,
70239 + .execute = execute_stackleak_tree_instrument,
70240 + .sub = NULL,
70241 + .next = NULL,
70242 + .static_pass_number = 0,
70243 + .tv_id = TV_NONE,
70244 + .properties_required = PROP_gimple_leh | PROP_cfg,
70245 + .properties_provided = 0,
70246 + .properties_destroyed = 0,
70247 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
70248 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
70249 + }
70250 +};
70251 +
70252 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
70253 + .pass = {
70254 + .type = RTL_PASS,
70255 + .name = "stackleak_final",
70256 + .gate = gate_stackleak_track_stack,
70257 + .execute = execute_stackleak_final,
70258 + .sub = NULL,
70259 + .next = NULL,
70260 + .static_pass_number = 0,
70261 + .tv_id = TV_NONE,
70262 + .properties_required = 0,
70263 + .properties_provided = 0,
70264 + .properties_destroyed = 0,
70265 + .todo_flags_start = 0,
70266 + .todo_flags_finish = 0
70267 + }
70268 +};
70269 +
70270 +static bool gate_stackleak_track_stack(void)
70271 +{
70272 + return track_frame_size >= 0;
70273 +}
70274 +
70275 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
70276 +{
70277 + gimple call;
70278 + tree decl, type;
70279 +
70280 + // insert call to void pax_track_stack(void)
70281 + type = build_function_type_list(void_type_node, NULL_TREE);
70282 + decl = build_fn_decl(track_function, type);
70283 + DECL_ASSEMBLER_NAME(decl); // for LTO
70284 + call = gimple_build_call(decl, 0);
70285 + if (before)
70286 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
70287 + else
70288 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
70289 +}
70290 +
70291 +static unsigned int execute_stackleak_tree_instrument(void)
70292 +{
70293 + basic_block bb;
70294 + gimple_stmt_iterator gsi;
70295 +
70296 + // 1. loop through BBs and GIMPLE statements
70297 + FOR_EACH_BB(bb) {
70298 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
70299 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
70300 + tree decl;
70301 + gimple stmt = gsi_stmt(gsi);
70302 +
70303 + if (!is_gimple_call(stmt))
70304 + continue;
70305 + decl = gimple_call_fndecl(stmt);
70306 + if (!decl)
70307 + continue;
70308 + if (TREE_CODE(decl) != FUNCTION_DECL)
70309 + continue;
70310 + if (!DECL_BUILT_IN(decl))
70311 + continue;
70312 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
70313 + continue;
70314 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
70315 + continue;
70316 +
70317 + // 2. insert track call after each __builtin_alloca call
70318 + stackleak_add_instrumentation(&gsi, false);
70319 +// print_node(stderr, "pax", decl, 4);
70320 + }
70321 + }
70322 +
70323 + // 3. insert track call at the beginning
70324 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
70325 + gsi = gsi_start_bb(bb);
70326 + stackleak_add_instrumentation(&gsi, true);
70327 +
70328 + return 0;
70329 +}
70330 +
70331 +static unsigned int execute_stackleak_final(void)
70332 +{
70333 + rtx insn;
70334 +
70335 + if (cfun->calls_alloca)
70336 + return 0;
70337 +
70338 + // 1. find pax_track_stack calls
70339 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
70340 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
70341 + rtx body;
70342 +
70343 + if (!CALL_P(insn))
70344 + continue;
70345 + body = PATTERN(insn);
70346 + if (GET_CODE(body) != CALL)
70347 + continue;
70348 + body = XEXP(body, 0);
70349 + if (GET_CODE(body) != MEM)
70350 + continue;
70351 + body = XEXP(body, 0);
70352 + if (GET_CODE(body) != SYMBOL_REF)
70353 + continue;
70354 + if (strcmp(XSTR(body, 0), track_function))
70355 + continue;
70356 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70357 + // 2. delete call if function frame is not big enough
70358 + if (get_frame_size() >= track_frame_size)
70359 + continue;
70360 + delete_insn_and_edges(insn);
70361 + }
70362 +
70363 +// print_simple_rtl(stderr, get_insns());
70364 +// print_rtl(stderr, get_insns());
70365 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70366 +
70367 + return 0;
70368 +}
70369 +
70370 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70371 +{
70372 + const char * const plugin_name = plugin_info->base_name;
70373 + const int argc = plugin_info->argc;
70374 + const struct plugin_argument * const argv = plugin_info->argv;
70375 + int i;
70376 + struct register_pass_info stackleak_tree_instrument_pass_info = {
70377 + .pass = &stackleak_tree_instrument_pass.pass,
70378 +// .reference_pass_name = "tree_profile",
70379 + .reference_pass_name = "optimized",
70380 + .ref_pass_instance_number = 0,
70381 + .pos_op = PASS_POS_INSERT_AFTER
70382 + };
70383 + struct register_pass_info stackleak_final_pass_info = {
70384 + .pass = &stackleak_final_rtl_opt_pass.pass,
70385 + .reference_pass_name = "final",
70386 + .ref_pass_instance_number = 0,
70387 + .pos_op = PASS_POS_INSERT_BEFORE
70388 + };
70389 +
70390 + if (!plugin_default_version_check(version, &gcc_version)) {
70391 + error(G_("incompatible gcc/plugin versions"));
70392 + return 1;
70393 + }
70394 +
70395 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
70396 +
70397 + for (i = 0; i < argc; ++i) {
70398 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
70399 + if (!argv[i].value) {
70400 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70401 + continue;
70402 + }
70403 + track_frame_size = atoi(argv[i].value);
70404 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
70405 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70406 + continue;
70407 + }
70408 + if (!strcmp(argv[i].key, "initialize-locals")) {
70409 + if (argv[i].value) {
70410 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70411 + continue;
70412 + }
70413 + init_locals = true;
70414 + continue;
70415 + }
70416 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70417 + }
70418 +
70419 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
70420 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
70421 +
70422 + return 0;
70423 +}
70424 diff -urNp linux-2.6.39.4/usr/gen_init_cpio.c linux-2.6.39.4/usr/gen_init_cpio.c
70425 --- linux-2.6.39.4/usr/gen_init_cpio.c 2011-05-19 00:06:34.000000000 -0400
70426 +++ linux-2.6.39.4/usr/gen_init_cpio.c 2011-08-05 19:44:38.000000000 -0400
70427 @@ -305,7 +305,7 @@ static int cpio_mkfile(const char *name,
70428 int retval;
70429 int rc = -1;
70430 int namesize;
70431 - int i;
70432 + unsigned int i;
70433
70434 mode |= S_IFREG;
70435
70436 @@ -394,9 +394,10 @@ static char *cpio_replace_env(char *new_
70437 *env_var = *expanded = '\0';
70438 strncat(env_var, start + 2, end - start - 2);
70439 strncat(expanded, new_location, start - new_location);
70440 - strncat(expanded, getenv(env_var), PATH_MAX);
70441 - strncat(expanded, end + 1, PATH_MAX);
70442 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
70443 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
70444 strncpy(new_location, expanded, PATH_MAX);
70445 + new_location[PATH_MAX] = 0;
70446 } else
70447 break;
70448 }
70449 diff -urNp linux-2.6.39.4/virt/kvm/kvm_main.c linux-2.6.39.4/virt/kvm/kvm_main.c
70450 --- linux-2.6.39.4/virt/kvm/kvm_main.c 2011-05-19 00:06:34.000000000 -0400
70451 +++ linux-2.6.39.4/virt/kvm/kvm_main.c 2011-08-05 20:34:06.000000000 -0400
70452 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
70453
70454 static cpumask_var_t cpus_hardware_enabled;
70455 static int kvm_usage_count = 0;
70456 -static atomic_t hardware_enable_failed;
70457 +static atomic_unchecked_t hardware_enable_failed;
70458
70459 struct kmem_cache *kvm_vcpu_cache;
70460 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
70461 @@ -2187,7 +2187,7 @@ static void hardware_enable_nolock(void
70462
70463 if (r) {
70464 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
70465 - atomic_inc(&hardware_enable_failed);
70466 + atomic_inc_unchecked(&hardware_enable_failed);
70467 printk(KERN_INFO "kvm: enabling virtualization on "
70468 "CPU%d failed\n", cpu);
70469 }
70470 @@ -2241,10 +2241,10 @@ static int hardware_enable_all(void)
70471
70472 kvm_usage_count++;
70473 if (kvm_usage_count == 1) {
70474 - atomic_set(&hardware_enable_failed, 0);
70475 + atomic_set_unchecked(&hardware_enable_failed, 0);
70476 on_each_cpu(hardware_enable_nolock, NULL, 1);
70477
70478 - if (atomic_read(&hardware_enable_failed)) {
70479 + if (atomic_read_unchecked(&hardware_enable_failed)) {
70480 hardware_disable_all_nolock();
70481 r = -EBUSY;
70482 }
70483 @@ -2509,7 +2509,7 @@ static void kvm_sched_out(struct preempt
70484 kvm_arch_vcpu_put(vcpu);
70485 }
70486
70487 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70488 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70489 struct module *module)
70490 {
70491 int r;
70492 @@ -2572,7 +2572,7 @@ int kvm_init(void *opaque, unsigned vcpu
70493 if (!vcpu_align)
70494 vcpu_align = __alignof__(struct kvm_vcpu);
70495 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
70496 - 0, NULL);
70497 + SLAB_USERCOPY, NULL);
70498 if (!kvm_vcpu_cache) {
70499 r = -ENOMEM;
70500 goto out_free_3;
70501 @@ -2582,9 +2582,11 @@ int kvm_init(void *opaque, unsigned vcpu
70502 if (r)
70503 goto out_free;
70504
70505 - kvm_chardev_ops.owner = module;
70506 - kvm_vm_fops.owner = module;
70507 - kvm_vcpu_fops.owner = module;
70508 + pax_open_kernel();
70509 + *(void **)&kvm_chardev_ops.owner = module;
70510 + *(void **)&kvm_vm_fops.owner = module;
70511 + *(void **)&kvm_vcpu_fops.owner = module;
70512 + pax_close_kernel();
70513
70514 r = misc_register(&kvm_dev);
70515 if (r) {